diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..249ce9b1157829d47d8fd833068de7e4da54cf1c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__init__.py @@ -0,0 +1,55 @@ +import threading + +import torch._C._lazy +from torch.utils._pytree import tree_flatten, tree_unflatten + +from .closure import add_step_closure, run_step_closures + + +def mark_step(device: str = "", wait=False): + """Triggers a mark step, which amounts to + - collecting a group of 'live' lazy tensors to index into the compilation cache + (lowering/compiling their IR graphs if not cached) + - kicking off execution of the compiled function + - (optionally, wait=True) waiting for cpu-side execution to complete (does not sync the accelerator) + """ + # TODO(whc) expand this to include backend hooks and align with XLA backend needs + torch._C._lazy._mark_step(device, [], wait=wait) + + run_step_closures() + + +def wait_device_ops(devices=None): + """Waits for all the async operations on the given devices to complete. + Args: + devices (string..., optional): The devices whose async ops need to be waited + for. If empty, all the local devices will be waited for. + """ + if devices is None: + devices = [] + torch._C._lazy._wait_device_ops(devices=devices) + + +def sync_multi(tensors, devices): + """ + Sync the list of lazy tensors so there IR get lowered for the activate backend + and the compiled computation graph get cached. + """ + torch._C._lazy._sync_multi(tensors, devices) + + +def get_tensor_id(tensor): + """Return a unique id of the lazy tensor maintained by LTC""" + return torch._C._lazy._get_tensor_id(tensor) + + +def to_cpu(tensors, devices=None): + devices = devices or ["lazy"] + + flattened, spec = tree_flatten(tensors) + sync_multi(flattened, devices) + return tree_unflatten([t.to("cpu") for t in flattened], spec) + + +def save(tensors, *args, **kwargs): + torch.save(to_cpu(tensors), *args, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dbef62700a762048a2368775fe62029545a6ccf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/closure.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/closure.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6419fafc782284381ee056b64abf3d9733451ba Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/closure.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/computation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/computation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9a8710eb32931dc111904a4c58761b664299961 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/computation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77c56e273081f0dc257bba036135a1d9cfe0a089 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/debug.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/debug.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc3c006f4b45d121ea2632f35fb28be852ccee11 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/debug.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/device_context.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/device_context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..557a85bca139f834a6aacdaf4016e75293ccf75f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/device_context.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/extract_compiled_graph.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/extract_compiled_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5d666b87cb9402bde438536d5e1b067a18399a2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/extract_compiled_graph.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/ir_cache.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/ir_cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b28142b1bf0c910865beef0d5d79160f0fefd9e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/ir_cache.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/metrics.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5369471bdfa1ce19bbc710fb27e85a9bc78f69d3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/metrics.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea81e9c7088ce1a568cf2264a72c2bd925a2a5cc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/tensor_factory_functions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/ts_backend.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/ts_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0967697a17490bbf3a30ee88fc68c0538893c7f6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/__pycache__/ts_backend.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/computation.py b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/computation.py new file mode 100644 index 0000000000000000000000000000000000000000..27b73c42e5c0de39e5112f717796cfce5d808bc1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/computation.py @@ -0,0 +1,26 @@ +import torch._C._lazy +import torch._C._lazy_ts_backend + + +def get_tensors_ts_device_data_node(tensors): + """Return tensor ids and eager tensors for DeviceData nodes in the + IR for the passed in lazy tensors. + + TODO: This API is currently ts backend specific. We are working on + generalizing it to all backends including XLA. + """ + return torch._C._lazy_ts_backend._get_tensors_ts_device_data_node(tensors) + + +def get_graph_hash(tensors): + """Return the graph hash for the passed in lazy tensors""" + return torch._C._lazy._get_graph_hash(tensors) + + +def run_cached_graph(hash_str, graph_inputs): + """Running the cached computation graph with the given inputs + + TODO: This API is currently ts backend specific. We are working on + generalizing it to all backends including XLA. + """ + return torch._C._lazy_ts_backend._run_cached_graph(hash_str, graph_inputs) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/config.py b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/config.py new file mode 100644 index 0000000000000000000000000000000000000000..e7a4d1dd24f8dbf505995982bbb33b8d90d3de2e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/config.py @@ -0,0 +1,16 @@ +import torch._C._lazy + + +def get_force_fallback(): + """Get the config used to force LTC fallback""" + return torch._C._lazy._get_force_fallback() + + +def set_force_fallback(configval): + """Set the config used to force LTC fallback""" + torch._C._lazy._set_force_fallback(configval) + + +def set_reuse_ir(val: bool): + """Set the config to reuse IR nodes for faster tracing""" + torch._C._lazy._set_reuse_ir(val) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/debug.py b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/debug.py new file mode 100644 index 0000000000000000000000000000000000000000..286aa049280c9d9555f64042f35b4a5fd57d0059 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/debug.py @@ -0,0 +1,21 @@ +import torch._C._lazy + + +def render_ir_graph(tensors): + """Return a text dump of the LTC IR graph in dot format for the tensors. + The text can be processed by tools like dot to be rendered in pdf,png etc.""" + return torch._C._lazy._get_tensors_dot(tensors) + + +def dump_ir(tensors, ir_format): + """Return a dump of the tensors in the specified format. + Valid format are + - text: for LTC IR + - backend: for the activate backend IR + """ + if ir_format == "text": + return torch._C._lazy._get_tensors_text(tensors) + elif ir_format == "backend": + return torch._C._lazy._get_tensors_backend(tensors) + else: + raise RuntimeError(f"Unrecognized IR format: {ir_format}") diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/extract_compiled_graph.py b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/extract_compiled_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..033d000c69d858aa1b8264d90c7d3e984229eb23 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/extract_compiled_graph.py @@ -0,0 +1,223 @@ +import copy +import dataclasses +import itertools +import os +from typing import Any, Callable, Dict, List + +import torch +import torch._lazy as lazy +import torch._lazy.metrics as metrics +from torch import fx +from torch._lazy import computation, debug as lazy_debug +from torch._lazy.tensor_factory_functions import tensor_factory_functions + +debug = os.environ.get("debug_extract_compiled_graph") is not None + + +@dataclasses.dataclass +class GraphInputMatcher: + """ + The GraphInputMatcher class setup the graph inputs for future calls after lazy tracing. + Specifically, those graph inputs corresponding to method parameters should be replaced with the + arguments for the current call. + + tensor_id_to_arg_idx maps the tensor id to the parameter index. + graph_input_tensor_ids, graph_input_ivalues list the tensor_id and ivalue for each of the + TS/XLA graph inputs. + """ + + tensor_id_to_arg_idx: Dict[int, int] + graph_input_tensor_ids: List[int] + # there are 2 categories of graph_input_tensors. + # Category 1: those whose id are not found in tensor_id_to_arg_idx. These are + # most likely const tensors and we can get its content from graph_input_tensors + # Category 2: those whose id are found in tensor_id_to_arg_idx. We should get + # the tensor from method arguments + graph_input_ivalues: List[Any] + + # get the real graph input tensors + def __call__(self, args): + real_input = [] + for tensor_id, traced_ivalue in zip( + self.graph_input_tensor_ids, self.graph_input_ivalues + ): + arg_idx = self.tensor_id_to_arg_idx.get(tensor_id, None) + if arg_idx is None: + inp = traced_ivalue + else: + inp = args[arg_idx] + real_input.append(inp) + return real_input + + +class ReturnValueHandler: + r""" + When ltc_sync_multi is called on multi tensors, the compiled graph + will contain output only for unique tensors - if a tensor appears multiple + times in the input to _ltc_sync_multi, only the first occurance matters. + + However from python level, we still expect multi tensors returned with duplciation + even if the TS graph dedup the output. e.g. for method: + + def forward(self, a): + return a, a + + the TS graph captured by LTC will return a single tensor, but Python method expects 2. + + This class dedup the lazy tensors first to get the index that will be used + to duplicate the eager tensors later. + """ + + def __init__(self, lazy_out_list): + self.index: List[List[int]] = [] + self.total_count = len(lazy_out_list) + + tensor_id_to_idx: Dict[int, int] = {} + for dup_idx, lazy_tensor in enumerate(lazy_out_list): + uniq_idx = tensor_id_to_idx.get(id(lazy_tensor), None) + if uniq_idx is not None: + self.index[uniq_idx].append(dup_idx) + else: + uniq_idx = len(self.index) + self.index.append([dup_idx]) + tensor_id_to_idx[id(lazy_tensor)] = uniq_idx + + def duplicate_eager_tensors(self, eager_tensor_list): + duplicated_list = [None] * self.total_count + assert len(eager_tensor_list) == len(self.index) + + for uniq_idx, eager_tensor in enumerate(eager_tensor_list): + for dup_idx in self.index[uniq_idx]: + duplicated_list[dup_idx] = eager_tensor + return duplicated_list + + +def force_lazy_device(model: fx.GraphModule): + """ + Factory methods in a Fx graph may create tensors for a specific eager devices. + If we take no actions, those eager tensors will be mixed with lazy tensors and + cause crash. This method overwrite those eager device to lazy device. + """ + + def tolazydevice(dev): + if isinstance(dev, torch.device): + return torch.device("lazy", index=dev.index) + return dev + + def hasDeviceArg(args, kwargs): + return any( + isinstance(arg, torch.device) + for arg in itertools.chain(args, kwargs.values()) + ) + + for nd in model.graph.nodes: + nd.args = tuple(tolazydevice(arg) for arg in nd.args) + nd.kwargs = {k: tolazydevice(v) for k, v in nd.kwargs.items()} + + # For torchbench like yolov3, hf_Bart, dynamo generates Fx graph that return + # eager tensors on the default device + # (check https://gist.github.com/shunting314/eabdf6c769c59bc384469717b8f9bb7f for yolove, + # and https://gist.github.com/shunting314/8d5e2d9348a3258959d3954186c48814 for hf_Bart). + # To force those tensors on the lazy device, we can not simply override + # the device argument since there is no explicit device argument. + # What we are doing here is, for the list of covered tensor factory methods + # we add a lazy device argument explicity. + # + # TODO: This solution is no ideal since we may miss some factory methods. In future + # when we support lazy mode, this method can be replaced by that. + if nd.target in tensor_factory_functions and not hasDeviceArg( + nd.args, nd.kwargs + ): + kwargs = dict(nd.kwargs) # nd.kwargs is immutable. make a mutable copy. + kwargs["device"] = torch.device("lazy") + nd.kwargs = kwargs + + model.recompile() + + +def get_fallback_ops(): + fallback_ops = [] + for opname in metrics.counter_names(): + if "aten::" not in opname: + continue + val = int(metrics.counter_value(opname)) + if val > 0: + fallback_ops.append(f"{opname}={val}") + + return fallback_ops + + +def extract_compiled_graph(model: fx.GraphModule, example_inputs) -> Callable: + """ + Optimize an eager model with LTC and returns a wrapper to execute the + compiled graph directly without retracing. It depends on other mechanisms + like TorchDynamo guards to guarantee the returned wrapper is only called + when it's safe. + """ + lazy_args = [arg.to(device="lazy") for arg in example_inputs] + args_tensor_ids = [lazy.get_tensor_id(lazy_arg) for lazy_arg in lazy_args] + tensor_id_to_arg_idx = {tensor_id: i for i, tensor_id in enumerate(args_tensor_ids)} + lazy_model = copy.deepcopy(model).to(device=torch.device("lazy")) + force_lazy_device(lazy_model) + + # This line executes lazy tracing and enable us extracting compiled graph later + metrics.reset() + lazy_out = lazy_model(*lazy_args) + fallback_ops = get_fallback_ops() + metrics.reset() + + if len(fallback_ops) > 0: + raise RuntimeError( + f"Fail to extact the compiled graph because of fallback: {','.join(fallback_ops)}" + ) + + if not isinstance(lazy_out, (tuple, list)): + lazy_out = (lazy_out,) + + args_and_out = tuple(lazy_args) + tuple(lazy_out) + return_value_handler = ReturnValueHandler(args_and_out) + if debug: + print("Fx code:\n", model.code) + print("LTC IR:", lazy_debug.dump_ir(args_and_out, "text")) + + # TODO: this part is TS backend specific for now and will be generalized to + # support XLA + ( + graph_input_tensor_ids, + graph_input_ivalues, + ) = computation.get_tensors_ts_device_data_node(args_and_out) + assert len(graph_input_tensor_ids) == len(graph_input_ivalues) + graph_input_matcher = GraphInputMatcher( + tensor_id_to_arg_idx, graph_input_tensor_ids, graph_input_ivalues + ) + + graph_hash = computation.get_graph_hash(args_and_out) + + if debug: + print("graph_hash", graph_hash) + print(f"args_tensor_ids {args_tensor_ids}") + print("tensor ids from device data:", graph_input_tensor_ids) + + # sync the list of output tensors so the computation graph for these + # tensors will be cached. Those computation graphs can be retrieved + # by graph hash later. + lazy.sync_multi(args_and_out, []) + + def optimized_mod(*args): + if len(args_and_out) == 0: + return () + graph_input = graph_input_matcher(args) + res = return_value_handler.duplicate_eager_tensors( + computation.run_cached_graph(graph_hash, graph_input) + ) + + assert len(res) == len(args_and_out) + for i, arg in enumerate(args): + # only copy those tensors that get inplace updated + if arg is not res[i]: + arg.copy_(res[i]) + + # skip the args + return res[len(args) :] + + return optimized_mod diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/tensor_factory_functions.py b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/tensor_factory_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..47aa9c500466daadf282633d43f0335e0a8c0b70 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/tensor_factory_functions.py @@ -0,0 +1,48 @@ +import torch + +""" +tensor_factory_functions defines the list of torch functions that create tensors. +The list is grabbed by searching thru native_functions.yaml by the following +regular expression: + + cat native_functions.yaml | grep 'func:' | grep -v "Tensor.*->" | grep "[-]>.*Tensor" + +It's possible that new tensor factory functions are added making this list stale. +Use at your own risk or regenerate the list. +""" +tensor_factory_functions = ( + torch._cudnn_init_dropout_state, + torch.arange, + torch.bartlett_window, + torch.blackman_window, + torch._empty_affine_quantized, + torch.empty_strided, + torch.eye, + torch.full, + torch.from_file, + torch.hann_window, + torch.hamming_window, + torch.kaiser_window, + torch.linspace, + torch.logspace, + torch.ones, + torch.scalar_tensor, + torch.rand, + torch.randint, + torch.randn, + torch.randperm, + torch.range, + torch._efficientzerotensor, + torch.zeros, + torch.tril_indices, + torch.triu_indices, + # Note: the following functions match the regular expression search above but + # they are not available in the torch module. Comment out. + # torch._sparse_coo_tensor_with_dims, + # torch.fft_fftfreq, + # torch.fft_rfftfreq, +) + ( + # torch.tensor is special since it's not in native_functions.yaml + # add it separately + torch.tensor, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/amp/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/amp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f080d3a978d32547789feaea0cf33a9145961340 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/amp/__init__.py @@ -0,0 +1 @@ +from .autocast_mode import _enter_autocast, _exit_autocast, autocast diff --git a/env-llmeval/lib/python3.10/site-packages/torch/amp/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/amp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b395f299935a17d5b59ae28ff93ae0d614967ab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/amp/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b665ef58db4495e95cf28b7c1099155a226d19c9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/amp/autocast_mode.py b/env-llmeval/lib/python3.10/site-packages/torch/amp/autocast_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..30c6aefcf1bdaf24943b408694c59971a8033ca6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/amp/autocast_mode.py @@ -0,0 +1,436 @@ +import functools +import warnings + +from typing import Any, Optional + +import torch +from torch.types import _dtype + +__all__ = ["autocast_decorator", "autocast"] + + +def autocast_decorator(autocast_instance, func): + @functools.wraps(func) + def decorate_autocast(*args, **kwargs): + with autocast_instance: + return func(*args, **kwargs) + + decorate_autocast.__script_unsupported = "@autocast() decorator is not supported in script mode" # type: ignore[attr-defined] + return decorate_autocast + + +class autocast: + r""" + Instances of :class:`autocast` serve as context managers or decorators that + allow regions of your script to run in mixed precision. + + In these regions, ops run in an op-specific dtype chosen by autocast + to improve performance while maintaining accuracy. + See the :ref:`Autocast Op Reference` for details. + + When entering an autocast-enabled region, Tensors may be any type. + You should not call ``half()`` or ``bfloat16()`` on your model(s) or inputs when using autocasting. + + :class:`autocast` should wrap only the forward pass(es) of your network, including the loss + computation(s). Backward passes under autocast are not recommended. + Backward ops run in the same type that autocast used for corresponding forward ops. + + Example for CUDA Devices:: + + # Creates model and optimizer in default precision + model = Net().cuda() + optimizer = optim.SGD(model.parameters(), ...) + + for input, target in data: + optimizer.zero_grad() + + # Enables autocasting for the forward pass (model + loss) + with torch.autocast(device_type="cuda"): + output = model(input) + loss = loss_fn(output, target) + + # Exits the context manager before backward() + loss.backward() + optimizer.step() + + See the :ref:`CUDA Automatic Mixed Precision examples` for usage (along with gradient scaling) + in more complex scenarios (e.g., gradient penalty, multiple models/losses, custom autograd functions). + + :class:`autocast` can also be used as a decorator, e.g., on the ``forward`` method of your model:: + + class AutocastModel(nn.Module): + ... + @torch.autocast(device_type="cuda") + def forward(self, input): + ... + + Floating-point Tensors produced in an autocast-enabled region may be ``float16``. + After returning to an autocast-disabled region, using them with floating-point + Tensors of different dtypes may cause type mismatch errors. If so, cast the Tensor(s) + produced in the autocast region back to ``float32`` (or other dtype if desired). + If a Tensor from the autocast region is already ``float32``, the cast is a no-op, + and incurs no additional overhead. + CUDA Example:: + + # Creates some tensors in default dtype (here assumed to be float32) + a_float32 = torch.rand((8, 8), device="cuda") + b_float32 = torch.rand((8, 8), device="cuda") + c_float32 = torch.rand((8, 8), device="cuda") + d_float32 = torch.rand((8, 8), device="cuda") + + with torch.autocast(device_type="cuda"): + # torch.mm is on autocast's list of ops that should run in float16. + # Inputs are float32, but the op runs in float16 and produces float16 output. + # No manual casts are required. + e_float16 = torch.mm(a_float32, b_float32) + # Also handles mixed input types + f_float16 = torch.mm(d_float32, e_float16) + + # After exiting autocast, calls f_float16.float() to use with d_float32 + g_float32 = torch.mm(d_float32, f_float16.float()) + + CPU Training Example:: + + # Creates model and optimizer in default precision + model = Net() + optimizer = optim.SGD(model.parameters(), ...) + + for epoch in epochs: + for input, target in data: + optimizer.zero_grad() + + # Runs the forward pass with autocasting. + with torch.autocast(device_type="cpu", dtype=torch.bfloat16): + output = model(input) + loss = loss_fn(output, target) + + loss.backward() + optimizer.step() + + + CPU Inference Example:: + + # Creates model in default precision + model = Net().eval() + + with torch.autocast(device_type="cpu", dtype=torch.bfloat16): + for input in data: + # Runs the forward pass with autocasting. + output = model(input) + + CPU Inference Example with Jit Trace:: + + class TestModel(nn.Module): + def __init__(self, input_size, num_classes): + super().__init__() + self.fc1 = nn.Linear(input_size, num_classes) + def forward(self, x): + return self.fc1(x) + + input_size = 2 + num_classes = 2 + model = TestModel(input_size, num_classes).eval() + + # For now, we suggest to disable the Jit Autocast Pass, + # As the issue: https://github.com/pytorch/pytorch/issues/75956 + torch._C._jit_set_autocast_mode(False) + + with torch.cpu.amp.autocast(cache_enabled=False): + model = torch.jit.trace(model, torch.randn(1, input_size)) + model = torch.jit.freeze(model) + # Models Run + for _ in range(3): + model(torch.randn(1, input_size)) + + Type mismatch errors *in* an autocast-enabled region are a bug; if this is what you observe, + please file an issue. + + ``autocast(enabled=False)`` subregions can be nested in autocast-enabled regions. + Locally disabling autocast can be useful, for example, if you want to force a subregion + to run in a particular ``dtype``. Disabling autocast gives you explicit control over + the execution type. In the subregion, inputs from the surrounding region + should be cast to ``dtype`` before use:: + + # Creates some tensors in default dtype (here assumed to be float32) + a_float32 = torch.rand((8, 8), device="cuda") + b_float32 = torch.rand((8, 8), device="cuda") + c_float32 = torch.rand((8, 8), device="cuda") + d_float32 = torch.rand((8, 8), device="cuda") + + with torch.autocast(device_type="cuda"): + e_float16 = torch.mm(a_float32, b_float32) + with torch.autocast(device_type="cuda", enabled=False): + # Calls e_float16.float() to ensure float32 execution + # (necessary because e_float16 was created in an autocasted region) + f_float32 = torch.mm(c_float32, e_float16.float()) + + # No manual casts are required when re-entering the autocast-enabled region. + # torch.mm again runs in float16 and produces float16 output, regardless of input types. + g_float16 = torch.mm(d_float32, f_float32) + + The autocast state is thread-local. If you want it enabled in a new thread, the context manager or decorator + must be invoked in that thread. This affects :class:`torch.nn.DataParallel` and + :class:`torch.nn.parallel.DistributedDataParallel` when used with more than one GPU per process + (see :ref:`Working with Multiple GPUs`). + + Args: + device_type(str, required): Device type to use. Possible values are: 'cuda', 'cpu', 'xpu' and 'hpu'. + The type is the same as the `type` attribute of a :class:`torch.device`. + Thus, you may obtain the device type of a tensor using `Tensor.device.type`. + enabled(bool, optional): Whether autocasting should be enabled in the region. + Default: ``True`` + dtype(torch_dtype, optional): Whether to use torch.float16 or torch.bfloat16. + cache_enabled(bool, optional): Whether the weight cache inside autocast should be enabled. + Default: ``True`` + """ + + def __init__( + self, + device_type: str, + dtype: Optional[_dtype] = None, + enabled: bool = True, + cache_enabled: Optional[bool] = None, + ): + if torch._jit_internal.is_scripting(): + self._enabled = enabled + self.device = device_type + self.fast_dtype = dtype + # TODO: support get_autocast_gpu/cpu_dtype + assert dtype is not None + return + self.device = device_type + self.custom_backend_name = torch._C._get_privateuse1_backend_name() + if self.device == "cuda": + self.fast_dtype = torch.get_autocast_gpu_dtype() + elif self.device == "cpu": + self.fast_dtype = torch.get_autocast_cpu_dtype() + elif self.device == "xpu": + self.fast_dtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined] + elif self.device == "ipu": + self.fast_dtype = torch.get_autocast_ipu_dtype() # type: ignore[attr-defined] + elif self.device == "hpu": + self.fast_dtype = torch.hpu.get_autocast_hpu_dtype() # type: ignore[attr-defined] + elif self.device == "xla": + self.fast_dtype = torch.get_autocast_xla_dtype() # type: ignore[attr-defined] + elif self.device == self.custom_backend_name: + necessary_funcs = [ + "is_autocast_enabled", + "set_autocast_enabled", + "get_autocast_dtype", + "set_autocast_dtype", + "get_amp_supported_dtype", + ] + message = f"Tried to use AMP with the `{self.custom_backend_name}` backend, but the backend has not " + message += "registered a module or the module miss some necessary funcs. The backend should register " + message += "a module by `torch._register_device_module`, and the module must have these funcs: \n" + message += "`is_autocast_enabled() -> bool`, `set_autocast_enabled(bool) -> None`, " + message += "`get_autocast_dtype() -> torch.dtype`, `set_autocast_dtype(torch.dtype) " + message += ( + "-> None` and `get_amp_supported_dtype() -> List[torch.dtype]`. \n" + ) + + assert hasattr(torch, self.custom_backend_name), message + self.custom_device_mod = getattr(torch, self.custom_backend_name) + for func in necessary_funcs: + assert hasattr(self.custom_device_mod, func), ( + message + f"But the func `{func}` is missing. \n" + ) + + self.fast_dtype = self.custom_device_mod.get_autocast_dtype() + else: + raise RuntimeError( + f"User specified an unsupported autocast device_type '{self.device}'" + ) + self._cache_enabled = torch.is_autocast_cache_enabled() + if ( + enabled + and torch.cuda.amp.common.amp_definitely_not_available() + and self.device == "cuda" + ): + warnings.warn( + "User provided device_type of 'cuda', but CUDA is not available. Disabling" + ) + enabled = False + if dtype is not None: + self.fast_dtype = dtype + if cache_enabled is not None: + self._cache_enabled = cache_enabled + + if self.device == "cpu": + supported_dtype = [torch.bfloat16, torch.float16] + if self.fast_dtype not in supported_dtype and enabled: + error_message = "In CPU autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += "CPU Autocast only supports dtype of " + error_message += ( + ", ".join(str(dtype) for dtype in supported_dtype) + " currently." + ) + warnings.warn(error_message) + enabled = False + elif self.device == "xpu": + supported_dtype = [torch.bfloat16, torch.float16] + if self.fast_dtype not in supported_dtype: + error_message = "In XPU autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += "XPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently." + warnings.warn(error_message) + enabled = False + elif self.device == "ipu": + supported_dtypes = [torch.bfloat16, torch.float16] + if self.fast_dtype not in supported_dtypes: + error_message = "In IPU autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += "IPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently." + warnings.warn(error_message) + enabled = False + elif self.device == "hpu": + supported_dtype = [torch.bfloat16, torch.float16] + if self.fast_dtype not in supported_dtype: + error_message = "In HPU autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += "HPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently." + warnings.warn(error_message) + enabled = False + elif self.device == self.custom_backend_name: + supported_dtype = self.custom_device_mod.get_amp_supported_dtype() + if self.fast_dtype not in supported_dtype: + error_message = f"In {self.custom_backend_name} autocast, but the target dtype is not supported. " + error_message += f"Disabling autocast.\n {self.custom_backend_name} Autocast only supports dtypes of " + error_message += ( + ", ".join(str(dtype) for dtype in supported_dtype) + " currently." + ) + warnings.warn(error_message) + enabled = False + elif self.device == "cuda": + if ( + enabled + and self.fast_dtype == torch.bfloat16 + and not torch.cuda.is_bf16_supported() + ): + raise RuntimeError( + "Current CUDA Device does not support bfloat16. Please switch dtype to float16." + ) + elif self.device == "xla": + supported_dtype = [torch.float16, torch.bfloat16] + if self.fast_dtype not in supported_dtype: + error_message = "In XLA autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += ( + "XLA Autocast only supports dtype of torch.bfloat16 currently." + ) + warnings.warn(error_message) + enabled = False + self._enabled = enabled + + def __enter__(self): + if torch._jit_internal.is_scripting(): + assert self.fast_dtype is not None + return self + + self.prev_cache_enabled = torch.is_autocast_cache_enabled() + if self.device == "cpu": + self.prev = torch.is_autocast_cpu_enabled() + self.prev_fastdtype = torch.get_autocast_cpu_dtype() + torch.set_autocast_cpu_enabled(self._enabled) + torch.set_autocast_cpu_dtype(self.fast_dtype) # type: ignore[arg-type] + torch.autocast_increment_nesting() + elif self.device == "xpu": + self.prev = torch.xpu.is_autocast_xpu_enabled() # type: ignore[attr-defined] + self.prev_fastdtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined] + torch.xpu.set_autocast_xpu_enabled(self._enabled) # type: ignore[attr-defined] + torch.xpu.set_autocast_xpu_dtype(self.fast_dtype) # type: ignore[attr-defined] + torch.autocast_increment_nesting() + elif self.device == "ipu": + self.prev = torch.is_autocast_ipu_enabled() # type: ignore[attr-defined] + self.prev_fastdtype = torch.get_autocast_ipu_dtype() # type: ignore[attr-defined] + torch.set_autocast_ipu_enabled(self._enabled) # type: ignore[attr-defined] + torch.set_autocast_ipu_dtype(self.fast_dtype) # type: ignore[attr-defined] + torch.autocast_increment_nesting() + elif self.device == "hpu": + self.prev = torch.hpu.is_autocast_hpu_enabled() # type: ignore[attr-defined] + self.prev_fastdtype = torch.hpu.get_autocast_hpu_dtype() # type: ignore[attr-defined] + torch.hpu.set_autocast_hpu_enabled(self._enabled) # type: ignore[attr-defined] + torch.hpu.set_autocast_hpu_dtype(self.fast_dtype) # type: ignore[attr-defined] + torch.autocast_increment_nesting() + elif self.device == "xla": + self.prev = torch.is_autocast_xla_enabled() # type: ignore[attr-defined] + self.prev_fastdtype = torch.get_autocast_xla_dtype() # type: ignore[attr-defined] + torch.set_autocast_xla_enabled(self._enabled) # type: ignore[attr-defined] + torch.set_autocast_xla_dtype(self.fast_dtype) # type: ignore[attr-defined] + torch.autocast_increment_nesting() + elif self.device == self.custom_backend_name: + self.prev = self.custom_device_mod.is_autocast_enabled() + self.prev_fastdtype = self.custom_device_mod.get_autocast_dtype() + self.custom_device_mod.set_autocast_enabled(self._enabled) + self.custom_device_mod.set_autocast_dtype(self.fast_dtype) + torch.autocast_increment_nesting() + else: + self.prev = torch.is_autocast_enabled() + self.prev_fastdtype = torch.get_autocast_gpu_dtype() + torch.set_autocast_gpu_dtype(self.fast_dtype) # type: ignore[arg-type] + torch.set_autocast_enabled(self._enabled) + torch.autocast_increment_nesting() + torch.set_autocast_cache_enabled(self._cache_enabled) + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override] + if torch._jit_internal.is_scripting(): + return + + # Drop the cache when we exit to a nesting level that's outside any instance of autocast. + if self.device == "cpu": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.set_autocast_cpu_enabled(self.prev) + torch.set_autocast_cpu_dtype(self.prev_fastdtype) + elif self.device == "xpu": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.xpu.set_autocast_xpu_enabled(self.prev) # type: ignore[attr-defined] + torch.xpu.set_autocast_xpu_dtype(self.prev_fastdtype) # type: ignore[attr-defined] + elif self.device == "ipu": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.set_autocast_ipu_enabled(self.prev) # type: ignore[attr-defined] + torch.set_autocast_ipu_dtype(self.prev_fastdtype) # type: ignore[attr-defined] + elif self.device == "hpu": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.hpu.set_autocast_hpu_enabled(self.prev) # type: ignore[attr-defined] + torch.hpu.set_autocast_hpu_dtype(self.prev_fastdtype) # type: ignore[attr-defined] + elif self.device == "xla": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.set_autocast_xla_enabled(self.prev) # type: ignore[attr-defined] + torch.set_autocast_xla_dtype(self.prev_fastdtype) # type: ignore[attr-defined] + elif self.device == self.custom_backend_name: + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + self.custom_device_mod.set_autocast_enabled(self.prev) + self.custom_device_mod.set_autocast_dtype(self.prev_fastdtype) + else: + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.set_autocast_enabled(self.prev) + torch.set_autocast_gpu_dtype(self.prev_fastdtype) + torch.set_autocast_cache_enabled(self.prev_cache_enabled) + return False + + def __call__(self, func): + if torch._jit_internal.is_scripting(): + return func + return autocast_decorator(self, func) + + +# These functions aren't meant for public usage. +# They are what we trace into a graph during pre_dispatch tracing +# when we encounter an autocast context manager. +def _enter_autocast(*vals): + # For pre-dispatch tracing, if a TorchFunction mode is active, we'll want to trace this into a graph. + if torch._C._is_torch_function_mode_enabled(): + return torch.overrides.handle_torch_function( + torch.amp._enter_autocast, [], *vals + ) + mode = torch.amp.autocast(*vals) + mode.__enter__() + return mode + + +def _exit_autocast(mode): + if torch._C._is_torch_function_mode_enabled(): + return torch.overrides.handle_torch_function(torch.amp._exit_autocast, [], mode) + mode.__exit__(None, None, None) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cpu/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/cpu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..14794627d752bead22c635390b38740848d47ce4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cpu/__init__.py @@ -0,0 +1,157 @@ +r""" +This package implements abstractions found in ``torch.cuda`` +to facilitate writing device-agnostic code. +""" + +from contextlib import AbstractContextManager +from typing import Any, Optional, Union + +import torch + +from .. import device as _device +from . import amp + +__all__ = [ + "is_available", + "synchronize", + "current_device", + "current_stream", + "stream", + "set_device", + "device_count", + "Stream", + "StreamContext", + "Event", +] + +_device_t = Union[_device, str, int, None] + + +def _is_cpu_support_vnni() -> bool: + r"""Returns a bool indicating if CPU supports VNNI.""" + return torch._C._cpu._is_cpu_support_vnni() + + +def is_available() -> bool: + r"""Returns a bool indicating if CPU is currently available. + + N.B. This function only exists to facilitate device-agnostic code + + """ + return True + + +def synchronize(device: _device_t = None) -> None: + r"""Waits for all kernels in all streams on the CPU device to complete. + + Args: + device (torch.device or int, optional): ignored, there's only one CPU device. + + N.B. This function only exists to facilitate device-agnostic code. + """ + pass + + +class Stream: + """ + N.B. This class only exists to facilitate device-agnostic code + """ + + def __init__(self, priority: int = -1): + pass + + def wait_stream(self, stream) -> None: + pass + + +class Event: + def query(self) -> bool: + return True + + def record(self, stream=None): + pass + + def synchronize(self): + pass + + def wait(self, stream=None): + pass + + +_default_cpu_stream = Stream() +_current_stream = _default_cpu_stream + + +def current_stream(device: _device_t = None) -> Stream: + r"""Returns the currently selected :class:`Stream` for a given device. + + Args: + device (torch.device or int, optional): Ignored. + + N.B. This function only exists to facilitate device-agnostic code + + """ + return _current_stream + + +class StreamContext(AbstractContextManager): + r"""Context-manager that selects a given stream. + + N.B. This class only exists to facilitate device-agnostic code + + """ + cur_stream: Optional[Stream] + + def __init__(self, stream): + self.stream = stream + self.prev_stream = _default_cpu_stream + + def __enter__(self): + cur_stream = self.stream + if cur_stream is None: + return + + global _current_stream + self.prev_stream = _current_stream + _current_stream = cur_stream + + def __exit__(self, type: Any, value: Any, traceback: Any): + cur_stream = self.stream + if cur_stream is None: + return + + global _current_stream + _current_stream = self.prev_stream + + +def stream(stream: Stream) -> AbstractContextManager: + r"""Wrapper around the Context-manager StreamContext that + selects a given stream. + + N.B. This function only exists to facilitate device-agnostic code + """ + return StreamContext(stream) + + +def device_count() -> int: + r"""Returns number of CPU devices (not cores). Always 1. + + N.B. This function only exists to facilitate device-agnostic code + """ + return 1 + + +def set_device(device: _device_t) -> None: + r"""Sets the current device, in CPU we do nothing. + + N.B. This function only exists to facilitate device-agnostic code + """ + pass + + +def current_device() -> str: + r"""Returns current device for cpu. Always 'cpu'. + + N.B. This function only exists to facilitate device-agnostic code + """ + return "cpu" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28368fdbe81f3f8aa559c2c13b9af49fae747a32 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ca2f995a08ccf095c6e64663dd042d46385eb09 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/_config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/const_fold.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/const_fold.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20e47067129d4e2ec66f348eb9d24e87e6245716 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/const_fold.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/graph_gradual_typechecker.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/graph_gradual_typechecker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a61e8c1b5166cd78f818c6460fe60c1640f586a9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/graph_gradual_typechecker.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/normalize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/normalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45dcf6cc48b7976dc546e31447f321cfe8686cda Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/normalize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/optimization.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/optimization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8481bd4236094ec94923c3a2e4cfd85fa604232a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/optimization.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d0df76dc87f0ee95541c92db5aaa04123f66a43 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/partitioner_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/refinement_types.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/refinement_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4adb14840454eb10c00cd2523939757465f3ce5f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/refinement_types.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21f492c1a31a15d84343081cc6b4451c5868c50d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/rewriter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/schema_type_annotation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/schema_type_annotation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..971876af8d7301300918f9de33a2b7f825bf9e35 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/schema_type_annotation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72a91608b6043a47814840984d981f46f2e641c9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/symbolic_shapes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/unify_refinements.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/unify_refinements.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f197b6d4825db29b9a6a1ae443d06536208c7d8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/unify_refinements.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/validator.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/validator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f26e786265e873073d8913df708d3a1666c0b0a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/__pycache__/validator.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ec3993088b4d657e757a73bdadf31a3dc2ba782 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5edf5c62b16b64e12a049d4d3d4be34c664dfd4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..671251b088c072efc7efe8e20d66916de96dbe52 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_generator.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_transformation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_transformation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f856695f5b81626ad454a9e1cf3541d9e807d654 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/constraint_transformation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/operation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/operation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8c42ec05cd8c0f357cca7461a0d4944956112de Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/operation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/transform_to_z3.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/transform_to_z3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aced7fb8cda7b547f93e38b1af6f7e9db3060df6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/migrate_gradual_types/__pycache__/transform_to_z3.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/core.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/core.py new file mode 100644 index 0000000000000000000000000000000000000000..560ceb588924d69e0721f261c107d17ee494ef95 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/core.py @@ -0,0 +1,118 @@ +from collections.abc import Iterator # type: ignore[import] +from functools import partial + +from .unification_tools import assoc # type: ignore[import] +from .utils import transitive_get as walk +from .variable import isvar +from .dispatch import dispatch + +__all__ = ["reify", "unify"] + +############### +# Reification # +############### + +@dispatch(Iterator, dict) +def _reify(t, s): + return map(partial(reify, s=s), t) + # return (reify(arg, s) for arg in t) +_reify + +@dispatch(tuple, dict) # type: ignore[no-redef] +def _reify(t, s): + return tuple(reify(iter(t), s)) +_reify + +@dispatch(list, dict) # type: ignore[no-redef] +def _reify(t, s): + return list(reify(iter(t), s)) +_reify + +@dispatch(dict, dict) # type: ignore[no-redef] +def _reify(d, s): + return {k: reify(v, s) for k, v in d.items()} +_reify + +@dispatch(object, dict) # type: ignore[no-redef] +def _reify(o, s): + return o # catch all, just return the object + +def reify(e, s): + """ Replace variables of expression with substitution + >>> # xdoctest: +SKIP + >>> x, y = var(), var() + >>> e = (1, x, (3, y)) + >>> s = {x: 2, y: 4} + >>> reify(e, s) + (1, 2, (3, 4)) + >>> e = {1: x, 3: (y, 5)} + >>> reify(e, s) + {1: 2, 3: (4, 5)} + """ + if isvar(e): + return reify(s[e], s) if e in s else e + return _reify(e, s) + +############### +# Unification # +############### + +seq = tuple, list, Iterator + +@dispatch(seq, seq, dict) +def _unify(u, v, s): + if len(u) != len(v): + return False + for uu, vv in zip(u, v): # avoiding recursion + s = unify(uu, vv, s) + if s is False: + return False + return s +# +# @dispatch((set, frozenset), (set, frozenset), dict) +# def _unify(u, v, s): +# i = u & v +# u = u - i +# v = v - i +# return _unify(sorted(u), sorted(v), s) +# +# +# @dispatch(dict, dict, dict) +# def _unify(u, v, s): +# if len(u) != len(v): +# return False +# for key, uval in iteritems(u): +# if key not in v: +# return False +# s = unify(uval, v[key], s) +# if s is False: +# return False +# return s +# +# +# @dispatch(object, object, dict) +# def _unify(u, v, s): +# return False # catch all + + +@dispatch(object, object, dict) +def unify(u, v, s): # no check at the moment + """ Find substitution so that u == v while satisfying s + >>> x = var('x') + >>> unify((1, x), (1, 2), {}) + {~x: 2} + """ + u = walk(u, s) + v = walk(v, s) + if u == v: + return s + if isvar(u): + return assoc(s, u, v) + if isvar(v): + return assoc(s, v, u) + return _unify(u, v, s) +unify + +@dispatch(object, object) # type: ignore[no-redef] +def unify(u, v): + return unify(u, v, {}) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/dispatch.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/dispatch.py new file mode 100644 index 0000000000000000000000000000000000000000..93039ce75070fec8da52d03067d5c0b851a79b50 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/dispatch.py @@ -0,0 +1,6 @@ +from functools import partial +from .multipledispatch import dispatch # type: ignore[import] + +namespace = {} # type: ignore[var-annotated] + +dispatch = partial(dispatch, namespace=namespace) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/more.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/more.py new file mode 100644 index 0000000000000000000000000000000000000000..2b074235f14a2adc56a07eac9959a67e49f614e2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/more.py @@ -0,0 +1,117 @@ +from .core import unify, reify # type: ignore[attr-defined] +from .dispatch import dispatch + + +def unifiable(cls): + """ Register standard unify and reify operations on class + This uses the type and __dict__ or __slots__ attributes to define the + nature of the term + See Also: + >>> # xdoctest: +SKIP + >>> class A(object): + ... def __init__(self, a, b): + ... self.a = a + ... self.b = b + >>> unifiable(A) + + >>> x = var('x') + >>> a = A(1, 2) + >>> b = A(1, x) + >>> unify(a, b, {}) + {~x: 2} + """ + _unify.add((cls, cls, dict), unify_object) + _reify.add((cls, dict), reify_object) + + return cls + + +######### +# Reify # +######### + + +def reify_object(o, s): + """ Reify a Python object with a substitution + >>> # xdoctest: +SKIP + >>> class Foo(object): + ... def __init__(self, a, b): + ... self.a = a + ... self.b = b + ... def __str__(self): + ... return "Foo(%s, %s)"%(str(self.a), str(self.b)) + >>> x = var('x') + >>> f = Foo(1, x) + >>> print(f) + Foo(1, ~x) + >>> print(reify_object(f, {x: 2})) + Foo(1, 2) + """ + if hasattr(o, '__slots__'): + return _reify_object_slots(o, s) + else: + return _reify_object_dict(o, s) + + +def _reify_object_dict(o, s): + obj = object.__new__(type(o)) + d = reify(o.__dict__, s) + if d == o.__dict__: + return o + obj.__dict__.update(d) + return obj + + +def _reify_object_slots(o, s): + attrs = [getattr(o, attr) for attr in o.__slots__] + new_attrs = reify(attrs, s) + if attrs == new_attrs: + return o + else: + newobj = object.__new__(type(o)) + for slot, attr in zip(o.__slots__, new_attrs): + setattr(newobj, slot, attr) + return newobj + + +@dispatch(slice, dict) +def _reify(o, s): + """ Reify a Python ``slice`` object """ + return slice(*reify((o.start, o.stop, o.step), s)) + + +######### +# Unify # +######### + + +def unify_object(u, v, s): + """ Unify two Python objects + Unifies their type and ``__dict__`` attributes + >>> # xdoctest: +SKIP + >>> class Foo(object): + ... def __init__(self, a, b): + ... self.a = a + ... self.b = b + ... def __str__(self): + ... return "Foo(%s, %s)"%(str(self.a), str(self.b)) + >>> x = var('x') + >>> f = Foo(1, x) + >>> g = Foo(1, 2) + >>> unify_object(f, g, {}) + {~x: 2} + """ + if type(u) != type(v): + return False + if hasattr(u, '__slots__'): + return unify([getattr(u, slot) for slot in u.__slots__], + [getattr(v, slot) for slot in v.__slots__], + s) + else: + return unify(u.__dict__, v.__dict__, s) + + +@dispatch(slice, slice, dict) +def _unify(u, v, s): + """ Unify a Python ``slice`` object """ + return unify((u.start, u.stop, u.step), (v.start, v.stop, v.step), s) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/unification_tools.py b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/unification_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..ae159b937ec079a085f24ee3d5aac6fe7f6b67e8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/fx/experimental/unification/unification_tools.py @@ -0,0 +1,395 @@ +import collections +import operator +from functools import reduce +from collections.abc import Mapping + +__all__ = ('merge', 'merge_with', 'valmap', 'keymap', 'itemmap', + 'valfilter', 'keyfilter', 'itemfilter', + 'assoc', 'dissoc', 'assoc_in', 'update_in', 'get_in') + + +def _get_factory(f, kwargs): + factory = kwargs.pop('factory', dict) + if kwargs: + raise TypeError(f"{f.__name__}() got an unexpected keyword argument '{kwargs.popitem()[0]}'") + return factory + + +def merge(*dicts, **kwargs): + """ Merge a collection of dictionaries + + >>> merge({1: 'one'}, {2: 'two'}) + {1: 'one', 2: 'two'} + + Later dictionaries have precedence + + >>> merge({1: 2, 3: 4}, {3: 3, 4: 4}) + {1: 2, 3: 3, 4: 4} + + See Also: + merge_with + """ + if len(dicts) == 1 and not isinstance(dicts[0], Mapping): + dicts = dicts[0] + factory = _get_factory(merge, kwargs) + + rv = factory() + for d in dicts: + rv.update(d) + return rv + + +def merge_with(func, *dicts, **kwargs): + """ Merge dictionaries and apply function to combined values + + A key may occur in more than one dict, and all values mapped from the key + will be passed to the function as a list, such as func([val1, val2, ...]). + + >>> merge_with(sum, {1: 1, 2: 2}, {1: 10, 2: 20}) + {1: 11, 2: 22} + + >>> merge_with(first, {1: 1, 2: 2}, {2: 20, 3: 30}) # doctest: +SKIP + {1: 1, 2: 2, 3: 30} + + See Also: + merge + """ + if len(dicts) == 1 and not isinstance(dicts[0], Mapping): + dicts = dicts[0] + factory = _get_factory(merge_with, kwargs) + + result = factory() + for d in dicts: + for k, v in d.items(): + if k not in result: + result[k] = [v] + else: + result[k].append(v) + return valmap(func, result, factory) + + +def valmap(func, d, factory=dict): + """ Apply function to values of dictionary + + >>> bills = {"Alice": [20, 15, 30], "Bob": [10, 35]} + >>> valmap(sum, bills) # doctest: +SKIP + {'Alice': 65, 'Bob': 45} + + See Also: + keymap + itemmap + """ + rv = factory() + rv.update(zip(d.keys(), map(func, d.values()))) + return rv + + +def keymap(func, d, factory=dict): + """ Apply function to keys of dictionary + + >>> bills = {"Alice": [20, 15, 30], "Bob": [10, 35]} + >>> keymap(str.lower, bills) # doctest: +SKIP + {'alice': [20, 15, 30], 'bob': [10, 35]} + + See Also: + valmap + itemmap + """ + rv = factory() + rv.update(zip(map(func, d.keys()), d.values())) + return rv + + +def itemmap(func, d, factory=dict): + """ Apply function to items of dictionary + + >>> accountids = {"Alice": 10, "Bob": 20} + >>> itemmap(reversed, accountids) # doctest: +SKIP + {10: "Alice", 20: "Bob"} + + See Also: + keymap + valmap + """ + rv = factory() + rv.update(map(func, d.items())) + return rv + + +def valfilter(predicate, d, factory=dict): + """ Filter items in dictionary by value + + >>> iseven = lambda x: x % 2 == 0 + >>> d = {1: 2, 2: 3, 3: 4, 4: 5} + >>> valfilter(iseven, d) + {1: 2, 3: 4} + + See Also: + keyfilter + itemfilter + valmap + """ + rv = factory() + for k, v in d.items(): + if predicate(v): + rv[k] = v + return rv + + +def keyfilter(predicate, d, factory=dict): + """ Filter items in dictionary by key + + >>> iseven = lambda x: x % 2 == 0 + >>> d = {1: 2, 2: 3, 3: 4, 4: 5} + >>> keyfilter(iseven, d) + {2: 3, 4: 5} + + See Also: + valfilter + itemfilter + keymap + """ + rv = factory() + for k, v in d.items(): + if predicate(k): + rv[k] = v + return rv + + +def itemfilter(predicate, d, factory=dict): + """ Filter items in dictionary by item + + >>> def isvalid(item): + ... k, v = item + ... return k % 2 == 0 and v < 4 + + >>> d = {1: 2, 2: 3, 3: 4, 4: 5} + >>> itemfilter(isvalid, d) + {2: 3} + + See Also: + keyfilter + valfilter + itemmap + """ + rv = factory() + for item in d.items(): + if predicate(item): + k, v = item + rv[k] = v + return rv + + +def assoc(d, key, value, factory=dict): + """ Return a new dict with new key value pair + + New dict has d[key] set to value. Does not modify the initial dictionary. + + >>> assoc({'x': 1}, 'x', 2) + {'x': 2} + >>> assoc({'x': 1}, 'y', 3) # doctest: +SKIP + {'x': 1, 'y': 3} + """ + d2 = factory() + d2.update(d) + d2[key] = value + return d2 + + +def dissoc(d, *keys, **kwargs): + """ Return a new dict with the given key(s) removed. + + New dict has d[key] deleted for each supplied key. + Does not modify the initial dictionary. + + >>> dissoc({'x': 1, 'y': 2}, 'y') + {'x': 1} + >>> dissoc({'x': 1, 'y': 2}, 'y', 'x') + {} + >>> dissoc({'x': 1}, 'y') # Ignores missing keys + {'x': 1} + """ + factory = _get_factory(dissoc, kwargs) + d2 = factory() + + if len(keys) < len(d) * .6: + d2.update(d) + for key in keys: + if key in d2: + del d2[key] + else: + remaining = set(d) + remaining.difference_update(keys) + for k in remaining: + d2[k] = d[k] + return d2 + + +def assoc_in(d, keys, value, factory=dict): + """ Return a new dict with new, potentially nested, key value pair + + >>> purchase = {'name': 'Alice', + ... 'order': {'items': ['Apple', 'Orange'], + ... 'costs': [0.50, 1.25]}, + ... 'credit card': '5555-1234-1234-1234'} + >>> assoc_in(purchase, ['order', 'costs'], [0.25, 1.00]) # doctest: +SKIP + {'credit card': '5555-1234-1234-1234', + 'name': 'Alice', + 'order': {'costs': [0.25, 1.00], 'items': ['Apple', 'Orange']}} + """ + return update_in(d, keys, lambda x: value, value, factory) + + +def update_in(d, keys, func, default=None, factory=dict): + """ Update value in a (potentially) nested dictionary + + inputs: + d - dictionary on which to operate + keys - list or tuple giving the location of the value to be changed in d + func - function to operate on that value + + If keys == [k0,..,kX] and d[k0]..[kX] == v, update_in returns a copy of the + original dictionary with v replaced by func(v), but does not mutate the + original dictionary. + + If k0 is not a key in d, update_in creates nested dictionaries to the depth + specified by the keys, with the innermost value set to func(default). + + >>> inc = lambda x: x + 1 + >>> update_in({'a': 0}, ['a'], inc) + {'a': 1} + + >>> transaction = {'name': 'Alice', + ... 'purchase': {'items': ['Apple', 'Orange'], + ... 'costs': [0.50, 1.25]}, + ... 'credit card': '5555-1234-1234-1234'} + >>> update_in(transaction, ['purchase', 'costs'], sum) # doctest: +SKIP + {'credit card': '5555-1234-1234-1234', + 'name': 'Alice', + 'purchase': {'costs': 1.75, 'items': ['Apple', 'Orange']}} + + >>> # updating a value when k0 is not in d + >>> update_in({}, [1, 2, 3], str, default="bar") + {1: {2: {3: 'bar'}}} + >>> update_in({1: 'foo'}, [2, 3, 4], inc, 0) + {1: 'foo', 2: {3: {4: 1}}} + """ + ks = iter(keys) + k = next(ks) + + rv = inner = factory() + rv.update(d) + + for key in ks: + if k in d: + d = d[k] + dtemp = factory() + dtemp.update(d) + else: + d = dtemp = factory() + + inner[k] = inner = dtemp + k = key + + if k in d: + inner[k] = func(d[k]) + else: + inner[k] = func(default) + return rv + + +def get_in(keys, coll, default=None, no_default=False): + """ Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys. + + If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless + ``no_default`` is specified, then it raises KeyError or IndexError. + + ``get_in`` is a generalization of ``operator.getitem`` for nested data + structures such as dictionaries and lists. + + >>> transaction = {'name': 'Alice', + ... 'purchase': {'items': ['Apple', 'Orange'], + ... 'costs': [0.50, 1.25]}, + ... 'credit card': '5555-1234-1234-1234'} + >>> get_in(['purchase', 'items', 0], transaction) + 'Apple' + >>> get_in(['name'], transaction) + 'Alice' + >>> get_in(['purchase', 'total'], transaction) + >>> get_in(['purchase', 'items', 'apple'], transaction) + >>> get_in(['purchase', 'items', 10], transaction) + >>> get_in(['purchase', 'total'], transaction, 0) + 0 + >>> get_in(['y'], {}, no_default=True) + Traceback (most recent call last): + ... + KeyError: 'y' + + See Also: + itertoolz.get + operator.getitem + """ + try: + return reduce(operator.getitem, keys, coll) + except (KeyError, IndexError, TypeError): + if no_default: + raise + return default + + +def getter(index): + if isinstance(index, list): + if len(index) == 1: + index = index[0] + return lambda x: (x[index],) + elif index: + return operator.itemgetter(*index) + else: + return lambda x: () + else: + return operator.itemgetter(index) + + +def groupby(key, seq): + """ Group a collection by a key function + + >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank'] + >>> groupby(len, names) # doctest: +SKIP + {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']} + + >>> iseven = lambda x: x % 2 == 0 + >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP + {False: [1, 3, 5, 7], True: [2, 4, 6, 8]} + + Non-callable keys imply grouping on a member. + + >>> groupby('gender', [{'name': 'Alice', 'gender': 'F'}, + ... {'name': 'Bob', 'gender': 'M'}, + ... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP + {'F': [{'gender': 'F', 'name': 'Alice'}], + 'M': [{'gender': 'M', 'name': 'Bob'}, + {'gender': 'M', 'name': 'Charlie'}]} + + Not to be confused with ``itertools.groupby`` + + See Also: + countby + """ + if not callable(key): + key = getter(key) + d = collections.defaultdict(lambda: [].append) # type: ignore[var-annotated] + for item in seq: + d[key(item)](item) + rv = {} + for k, v in d.items(): + rv[k] = v.__self__ # type: ignore[var-annotated, attr-defined] + return rv + + +def first(seq): + """ The first element in a sequence + + >>> first('ABC') + 'A' + """ + return next(iter(seq)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/masked/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e0193416ed2f572b476e3fabfa8668c7c4b651dd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/masked/__init__.py @@ -0,0 +1,37 @@ +from .maskedtensor.core import is_masked_tensor, MaskedTensor +from .maskedtensor.creation import as_masked_tensor, masked_tensor +from ._ops import ( + _canonical_dim, + _generate_docstring, + _reduction_identity, + _where, + _input_mask, + _output_mask, + _combine_input_and_mask, + sum, + prod, + cumsum, + cumprod, + amax, + amin, + argmax, + argmin, + mean, + median, + logsumexp, + logaddexp, + norm, + var, + std, + softmax, + log_softmax, + softmin, + normalize, +) + +__all__ = [ + "as_masked_tensor", + "is_masked_tensor", + "masked_tensor", + "MaskedTensor", +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3352301479eda8c11cca23f703ef0946184e7167 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/_docs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/_docs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b43a5c9fcb0d82d72cf69461f363d1cbfe6fee0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/_docs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/_ops.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ac350c14b19503dad3419adccb58101b7fca7e9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/masked/__pycache__/_ops.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/_docs.py b/env-llmeval/lib/python3.10/site-packages/torch/masked/_docs.py new file mode 100644 index 0000000000000000000000000000000000000000..bf96b49e3e8271cd93b5e84f74f5f333e56fda98 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/masked/_docs.py @@ -0,0 +1,1177 @@ +# This file is generated, do not modify it! +# +# To update this file, run the update masked docs script as follows: +# +# python tools/update_masked_docs.py +# +# The script must be called from an environment where the development +# version of torch package can be imported and is functional. +# + +amax_docstring = """amax(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns maximum of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +The identity value of maximum operation, which is used to start the +reduction, depends on input dtype. For instance, for float32, uint8, +and int32 dtypes, the identity values are ``-inf``, ``0``, and ``-2147483648``, respectively. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in maximum computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of maximum operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.amax(input, 1, mask=mask) + tensor([ -1, -9223372036854775808]) +""" + +amin_docstring = """amin(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns minimum of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +The identity value of minimum operation, which is used to start the +reduction, depends on input dtype. For instance, for float32, uint8, +and int32 dtypes, the identity values are ``inf``, ``255``, and ``2147483647``, respectively. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in minimum computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of minimum operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.amin(input, 1, mask=mask) + tensor([ -3, 9223372036854775807]) +""" + +argmax_docstring = """argmax(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor +Returns argmax of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. +The identity value of argmax operation, which is used to start the +reduction, depends on input dtype. For instance, for float32, uint8, +and int32 dtypes, the identity values are ``-inf``, ``0``, and ``-2147483648``, respectively. +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in argmax computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of argmax operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which argmax is computed. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.argmax(input, 1, mask=mask) + tensor([2, 0]) +""" + +argmin_docstring = """argmin(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor +Returns argmin of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. +The identity value of argmin operation, which is used to start the +reduction, depends on input dtype. For instance, for float32, uint8, +and int32 dtypes, the identity values are ``inf``, ``255``, and ``2147483647``, respectively. +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in argmin computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of argmin operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which argmin is computed. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.argmin(input, 1, mask=mask) + tensor([0, 0]) +""" + +cumprod_docstring = """cumprod(input, dim, *, dtype=None, mask=None) -> Tensor + +Returns cumulative_prod of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is +defined as ``prod(x[:i])``. + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +cumulative_prod computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the cumulative_prod output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which cumulative_prod is computed. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.cumprod(input, 1, mask=mask) + tensor([[-3., -3., 3.], + [ 1., 1., 1.]]) +""" + +cumsum_docstring = """cumsum(input, dim, *, dtype=None, mask=None) -> Tensor + +Returns cumulative_sum of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is +defined as ``sum(x[:i])``. + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +cumulative_sum computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the cumulative_sum output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which cumulative_sum is computed. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.cumsum(input, 1, mask=mask) + tensor([[-3., -3., -4.], + [ 0., 0., 0.]]) +""" + +log_softmax_docstring = """log_softmax(input, dim, *, dtype=None, mask=None) -> Tensor + +Returns log_softmax of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. LogSoftmax of i-th element in ``x`` is +defined as ``log(exp(x[i])/sum(exp(x)))``. + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +log_softmax computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the log_softmax output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which log_softmax is computed. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.log_softmax(input, 1, mask=mask) + tensor([[-2.1269, -inf, -0.1269], + [ nan, nan, nan]]) +""" + +logsumexp_docstring = """logsumexp(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns logsumexp of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +The identity value of logsumexp operation, which is used to start the reduction, is ``-2147483648``. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in logsumexp computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of logsumexp operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.logsumexp(input, 1, mask=mask) + tensor([ 0, -9223372036854775808]) +""" + +mean_docstring = """mean(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns mean of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +By definition, the identity value of a mean operation is the mean +value of the tensor. If all elements of the input tensor along given +dimension(s) :attr:`dim` are masked-out, the identity value of the +mean is undefined. Due to this ambiguity, the elements of output +tensor with strided layout, that correspond to fully masked-out +elements, have ``nan`` values. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in mean computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of mean operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.mean(input, 1, mask=mask) + tensor([-2., nan]) +""" + +median_docstring = """median(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor +Returns median of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. +By definition, the identity value of a median operation is the median +value of the tensor. If all elements of the input tensor along given +dimension(s) :attr:`dim` are masked-out, the identity value of the +median is undefined. Due to this ambiguity, the elements of output +tensor with strided layout, that correspond to fully masked-out +elements, have ``nan`` values. +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in median computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of median operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which median is computed. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.median(input, 1, mask=mask) + tensor([-3., nan]) +""" + +norm_docstring = """norm(input, ord, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns norm of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +The identity value of norm operation, which is used to start the +reduction, is ``0.0``, except for ``ord=-inf`` it is +``inf``. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in norm computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of norm operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + ord (int, float, optional): the order of vector norm. Default: 2. + See :func:`torch.linalg.vector_norm` for a list of supported norms. + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.norm(input, 2.0, 1, mask=mask) + tensor([3.1623, 0.0000]) +""" + +normalize_docstring = """normalize(input, ord, dim, *, eps=1e-12, dtype=None, mask=None) -> Tensor + +Returns normalize of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Normalize of i-th element in ``x`` is +defined as ``x[i]/max(norm(x, p), eps)``. + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +normalize computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the normalize output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + ord (int, float): the order of vector norm. Default: 2. + See :func:`torch.linalg.vector_norm` for a list of supported norms. + dim (int): the dimension along which normalize is computed. + +Keyword args: + eps (float, optional): small value to avoid division by zero. Default: 1e-12. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.normalize(input, 2.0, 1, mask=mask) + tensor([[-0.9487, 0.0000, -0.3162], + [ 0.0000, 0.0000, 0.0000]]) +""" + +prod_docstring = """prod(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns product of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +The identity value of product operation, which is used to start the reduction, is ``1``. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in product computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of product operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.prod(input, 1, mask=mask) + tensor([3, 1]) +""" + +softmax_docstring = """softmax(input, dim, *, dtype=None, mask=None) -> Tensor + +Returns softmax of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Softmax of i-th element in ``x`` is +defined as ``exp(x[i])/sum(exp(x))``. + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +softmax computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the softmax output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which softmax is computed. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.softmax(input, 1, mask=mask) + tensor([[0.1192, 0.0000, 0.8808], + [ nan, nan, nan]]) +""" + +softmin_docstring = """softmin(input, dim, *, dtype=None, mask=None) -> Tensor + +Returns softmin of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Softmin of i-th element in ``x`` is +defined as ``exp(-x[i])/sum(exp(-x))``. + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +softmin computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the softmin output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which softmin is computed. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.softmin(input, 1, mask=mask) + tensor([[0.8808, 0.0000, 0.1192], + [ nan, nan, nan]]) +""" + +std_docstring = """std(input, dim, unbiased, *, keepdim=False, dtype=None, mask=None) -> Tensor +Returns standard_deviation of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. +The identity value of sample standard deviation operation is undefined. The +elements of output tensor with strided layout, that correspond to +fully masked-out elements, have ``nan`` values. +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in standard_deviation computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of standard_deviation operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + unbiased (bool): when True, use Bessel’s correction, otherwise, compute + the uncorrected sample variance. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.std(input, 1, False, mask=mask) + tensor([1., nan]) +""" + +sum_docstring = """sum(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns sum of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +The identity value of sum operation, which is used to start the reduction, is ``0``. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in sum computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of sum operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.sum(input, 1, mask=mask) + tensor([-4, 0]) +""" + +var_docstring = """var(input, dim, unbiased, *, keepdim=False, dtype=None, mask=None) -> Tensor +Returns variance of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. +The identity value of sample variance operation is undefined. The +elements of output tensor with strided layout, that correspond to +fully masked-out elements, have ``nan`` values. +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in variance computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of variance operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + unbiased (bool): when True, use Bessel’s correction, otherwise, compute + the uncorrected sample variance. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.var(input, 1, False, mask=mask) + tensor([1., nan]) +""" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/_ops.py b/env-llmeval/lib/python3.10/site-packages/torch/masked/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..083af67f12099a888b4f56662f9a6a3695fe27d5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/masked/_ops.py @@ -0,0 +1,1796 @@ + +import warnings + +# A workaround to support both TorchScript and MyPy: +from typing import Any, List, Optional, Tuple, TYPE_CHECKING, Union + +import torch +from torch import Tensor +from torch.masked import as_masked_tensor, is_masked_tensor, MaskedTensor +from . import _docs +from torch._prims_common import corresponding_real_dtype +from torch import sym_float + +if TYPE_CHECKING: + from torch.types import _dtype as DType + + DimOrDims = Optional[Union[int, Tuple[int], List[int]]] +else: + # The JIT doesn't understand Union, nor torch.dtype here + DType = int + DimOrDims = Optional[Tuple[int]] + + +__all__ = [] + +# All masked reduction/normalization operations have the same +# signatures. Here we introduce docstring templates that are applied +# to docstrings of reduction/normalization functions via +# _apply_docstring_templates decorator. + + +def _apply_docstring_templates(func): + """Decorator that applies docstring templates to function docstring + and returns the function instance. + """ + + doc_string = getattr(_docs, f"{func.__name__}_docstring", None) + if doc_string is None: + warnings.warn( + f"No documentation string available for {func.__name__}." + " PyTorch team should run `python tools/update_masked_docs.py`" + " to generate the missing docstrings." + ) + else: + func.__doc__ = doc_string + + # Expose function as public symbol + __all__.append(func.__name__) + + return func + + +def _generate_docstring(func): + """A utility function called from tools/update_masked_docs.py + script to update the module torch.masked._docs.py + """ + docstring_templates = dict( + reduction_signature="""\ +{function_name}(input, {operation_args}, *, {operation_kwargs}) -> Tensor""", + reduction_descr="""\ +Returns {operation name} of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`.""", + reduction_args="""\ +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in {operation name} computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of {operation name} operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + {args_declarations} + +Keyword args: + {kwargs_declarations}""", + reduction_example="""\ +Example:: + + >>> input = {example_input} + >>> input + {indent_example_input} + >>> mask = {example_mask} + >>> mask + {indent_example_mask} + >>> {full_function_name}(input, {example_args}, mask=mask) + {indent_example_output} +""", + reduction_identity="""\ +The identity value of {operation name} operation, which is used to start the reduction, is ``{identity_int32}``.""", + reduction_identity_dtype="""\ +The identity value of {operation name} operation, which is used to start the +reduction, depends on input dtype. For instance, for float32, uint8, +and int32 dtypes, the identity values are ``{identity_float32}``, ``{identity_uint8}``, and ``{identity_int32}``, respectively.""", + normalization_signature="""\ +{function_name}(input, {operation_args}, *, {operation_kwargs}) -> Tensor""", + normalization_descr="""\ +Returns {operation name} of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +{definition}""", + normalization_args="""\ +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +{operation name} computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the {operation name} output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + {args_declarations} + +Keyword args: + {kwargs_declarations}""", + normalization_example="""\ +Example:: + + >>> input = {example_input} + >>> input + {indent_example_input} + >>> mask = {example_mask} + >>> mask + {indent_example_mask} + >>> {full_function_name}(input, {example_args}, mask=mask) + {indent_example_output} +""", + ) + + args_and_kwargs = dict( + # argument name sufficies separated by double underscore will + # be removed in the final documentation string. + sum=(("dim",), ("keepdim=False", "dtype=None", "mask=None")), + prod=(("dim",), ("keepdim=False", "dtype=None", "mask=None")), + cumsum=(("dim__as_int",), ("dtype=None", "mask=None")), + cumprod=(("dim__as_int",), ("dtype=None", "mask=None")), + amin=(("dim",), ("keepdim=False", "dtype=None", "mask=None")), + amax=(("dim",), ("keepdim=False", "dtype=None", "mask=None")), + argmin=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")), + argmax=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")), + mean=(("dim",), ("keepdim=False", "dtype=None", "mask=None")), + median=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")), + norm=( + ( + "ord", + "dim", + ), + ("keepdim=False", "dtype=None", "mask=None"), + ), + var=(("dim", "unbiased"), ("keepdim=False", "dtype=None", "mask=None")), + std=(("dim", "unbiased"), ("keepdim=False", "dtype=None", "mask=None")), + logsumexp=(("dim",), ("keepdim=False", "dtype=None", "mask=None")), + softmax=(("dim__as_int",), ("dtype=None", "mask=None")), + log_softmax=(("dim__as_int",), ("dtype=None", "mask=None")), + softmin=(("dim__as_int",), ("dtype=None", "mask=None")), + normalize=( + ( + "ord__required", + "dim__as_int", + ), + ("eps=1e-12", "dtype=None", "mask=None"), + ), + ) + + argument_declarations = dict( + dim="""\ +dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``.""", + dim__as_int="""\ +dim (int): the dimension along which {operation name} is computed.""", + ord="""\ +ord (int, float, optional): the order of vector norm. Default: 2. + See :func:`torch.linalg.vector_norm` for a list of supported norms.""", + ord__required="""\ +ord (int, float): the order of vector norm. Default: 2. + See :func:`torch.linalg.vector_norm` for a list of supported norms.""", + unbiased="""\ +unbiased (bool): when True, use Bessel’s correction, otherwise, compute + the uncorrected sample variance.""", + eps="""\ +eps (float, optional): small value to avoid division by zero. Default: {default}.""", + keepdim="""\ +keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: {default}.""", + dtype="""\ +dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: {default}.""", + mask="""\ +mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.""", + ) + + definitions = dict( + softmax="""\ +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Softmax of i-th element in ``x`` is +defined as ``exp(x[i])/sum(exp(x))``.""", + log_softmax="""\ +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. LogSoftmax of i-th element in ``x`` is +defined as ``log(exp(x[i])/sum(exp(x)))``.""", + softmin="""\ +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Softmin of i-th element in ``x`` is +defined as ``exp(-x[i])/sum(exp(-x))``.""", + normalize="""\ +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Normalize of i-th element in ``x`` is +defined as ``x[i]/max(norm(x, p), eps)``.""", + cumsum="""\ +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is +defined as ``sum(x[:i])``.""", + cumprod="""\ +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is +defined as ``prod(x[:i])``.""", + ) + + reduction_names = dict( + sum="sum", + prod="product", + amax="maximum", + amin="minimum", + argmax="argmax", + argmin="argmin", + mean="mean", + median="median", + norm="norm", + var="variance", + std="standard_deviation", + logsumexp="logsumexp", + ) + + normalization_names = dict( + softmax="softmax", + log_softmax="log_softmax", + softmin="softmin", + normalize="normalize", + cumsum="cumulative_sum", + cumprod="cumulative_prod", + ) + + operation_names = {} + operation_names.update(reduction_names) + operation_names.update(normalization_names) + + # Default example data: + example_dim = 1 + example_input = torch.tensor([[-3, -2, -1], [0, 1, 2]]) + example_mask = torch.tensor([[True, False, True], [False, False, False]]) + example_args: Tuple[Any, ...] + if func.__name__ in {"norm", "normalize"}: + example_args = (2.0, example_dim) + example_input = example_input.to(dtype=torch.float32) + elif func.__name__ in {"var", "std"}: + example_args = (example_dim, False) + elif func.__name__ == "median": + example_args = (example_dim,) + example_input = example_input.to(dtype=torch.float32) + else: + example_args = (example_dim,) + + operation_args: Tuple[str, ...] + operation_kwargs: Tuple[str, ...] + operation_args, operation_kwargs = args_and_kwargs[func.__name__] + arg_declarations = [ + "\n ".join( + argument_declarations.get(a, f'{a.split("__", 1)[0]}: TBD.').splitlines() + ) + for a in operation_args + ] + kwarg_declarations = [ + "\n ".join( + argument_declarations.get( + a.split("=", 1)[0], f'{a.split("__", 1)[0]}: TBD.' + ) + .format(default=a.split("=", 1)[1]) + .splitlines() + ) + for a in operation_kwargs + ] + + if func.__name__ in reduction_names: + op_kind = "reduction" + doc_sections = ["signature", "descr", "identity", "args", "example"] + elif func.__name__ in normalization_names: + op_kind = "normalization" + doc_sections = ["signature", "descr", "args", "example"] + example_input = example_input.to(dtype=torch.float32) + else: + assert 0 # add function name to operation names dictionaries + example_output = func(example_input, *example_args, mask=example_mask) + + template_data = { + "function_name": func.__name__, + "full_function_name": func.__module__ + "." + func.__name__, + "operation name": operation_names[func.__name__], + "operation_args": ", ".join(a.split("__", 1)[0] for a in operation_args), + "operation_kwargs": ", ".join(a.split("__", 1)[0] for a in operation_kwargs), + # one-line representation of a tensor: + "example_input": " ".join(str(example_input).split()), + "example_args": ", ".join(map(str, example_args)), + "example_mask": " ".join(str(example_mask).split()), + # multi-line representation of a tensor with indent + "indent_example_input": ("\n ").join(str(example_input).splitlines()), + "indent_example_mask": ("\n ").join(str(example_mask).splitlines()), + "indent_example_output": ("\n ").join(str(example_output).splitlines()), + } + + if func.__name__ in reduction_names: + template_data.update( + identity_uint8=_reduction_identity( + func.__name__, torch.tensor(0, dtype=torch.uint8) + ), + identity_int32=_reduction_identity( + func.__name__, torch.tensor(0, dtype=torch.int32) + ), + identity_float32=_reduction_identity( + func.__name__, torch.tensor(0, dtype=torch.float32) + ), + ) + if func.__name__ == "norm": + template_data.update( + identity_ord_ninf=_reduction_identity( + func.__name__, torch.tensor(0, dtype=torch.float32), float("-inf") + ) + ) + elif func.__name__ in normalization_names: + template_data.update(definition=definitions[func.__name__]) + else: + assert 0 # add function name to operation names dictionaries + template_data.update( + args_declarations=("\n ".join(arg_declarations)).format_map(template_data) + ) + template_data.update( + kwargs_declarations=("\n ".join(kwarg_declarations)).format_map( + template_data + ) + ) + + # Apply function name info to docstring templates: + templates = { + k: v.format_map(template_data) + for k, v in docstring_templates.items() + if k.startswith(op_kind) + } + templates.update( + (k, v.format_map(template_data) if isinstance(v, str) else v) + for k, v in template_data.items() + ) + + # Apply docstring templates to function doctring: + if func.__doc__ is None: + doc_template = "\n\n".join([f"{{{op_kind}_{sec}}}" for sec in doc_sections]) + else: + doc_template = func.__doc__ + return doc_template.format_map(templates) + + +def _reduction_identity(op_name: str, input: Tensor, *args): + """Return identity value as scalar tensor of a reduction operation on + given input, or None, if the identity value cannot be uniquely + defined for the given input. + + The identity value of the operation is defined as the initial + value to reduction operation that has a property ``op(op_identity, + value) == value`` for any value in the domain of the operation. + Or put it another way, including or excluding the identity value in + a list of operands will not change the reduction result. + + See https://github.com/pytorch/rfcs/pull/27 for more information. + + """ + dtype: DType = input.dtype + device = input.device + op_name = op_name.rsplit(".", 1)[-1] # lstrip module name when present + if op_name in {"sum", "cumsum"}: + return torch.tensor(0, dtype=dtype, device=device) + elif op_name in {"prod", "cumprod"}: + return torch.tensor(1, dtype=dtype, device=device) + elif op_name in {"amax", "argmax", "logsumexp"}: + if torch.is_floating_point(input): + return torch.tensor(-torch.inf, dtype=dtype, device=device) + elif torch.is_signed(input) or dtype == torch.uint8: + return torch.tensor(torch.iinfo(dtype).min, dtype=dtype, device=device) + elif op_name in {"amin", "argmin"}: + if torch.is_floating_point(input): + return torch.tensor(torch.inf, dtype=dtype, device=device) + elif torch.is_signed(input) or dtype == torch.uint8: + return torch.tensor(torch.iinfo(dtype).max, dtype=dtype, device=device) + elif op_name == "mean": + # Strictly speaking, the identity value of the mean operation + # is the mean of the input. Since the mean value depends on + # the dim argument and it may be a non-scalar tensor, we + # consider the identity value of the mean operation ambiguous. + # Moreover, the mean value of empty input is undefined. + return None + elif op_name == "norm": + ord = args[0] if args else 2 + if ord == float("-inf"): + assert torch.is_floating_point(input), input.dtype + return torch.tensor(torch.inf, dtype=dtype, device=device) + return torch.tensor(0, dtype=dtype, device=device) + elif op_name == "median": + # We use NaN for now because the implementation is currently using torch.nanmedian + # and NaN is the identity for that function since it gets ignored + dtype = input.dtype if torch.is_floating_point(input) else torch.float + return torch.tensor(torch.nan, dtype=dtype, device=device) + elif op_name in {"var", "std"}: + return None + raise NotImplementedError(f"identity of {op_name} on {dtype} input") + + +def _canonical_dim(dim: DimOrDims, ndim: int) -> Tuple[int, ...]: + """Return dim argument as a tuple of sorted dim values.""" + dims: List[int] = [] + if dim == (): + # Currently, `dim=()` in reductions operations means "reduce + # over all dimensions" while in future, it will read "no + # reduce". See https://github.com/pytorch/pytorch/issues/29137 + # When gh-29137 is resolved, this if-block must be deleted. + dim = None + if dim is None: + return tuple(range(ndim)) + ndim = max(ndim, 1) + dim_ = (dim,) if isinstance(dim, (int, torch.SymInt)) else dim + for d in dim_: + if d in dims: + raise RuntimeError(f"dim={d} appears multiple times in the list of dims") + if d >= ndim or d < -ndim: + raise IndexError( + f"Dimension out of range (expected to be in range of [{-ndim}, {ndim-1}], but got {d})" + ) + dims.append(d % ndim) + return tuple(sorted(dims)) + + +def _sparse_coo_flatten_indices(indices: Tensor, shape: tuple): + # Flatted N-D indices to 1-D indices + flat_indices = indices.new_zeros(indices.size(1)) + for d, sz in enumerate(shape): + flat_indices.mul_(sz) + flat_indices.add_(indices[d]) + return flat_indices + + +def _any(input: Tensor, dim: tuple, keepdim: bool): + # Support torch.any with tuple dim argument. + # Workaround of https://github.com/pytorch/pytorch/issues/56586 + r = input + for d in reversed(dim): + r = r.any(dim=d, keepdim=keepdim) + return r + + +def _sparse_coo_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor: + """Sparse variant of torch.where. Supports sparse COO and hybrid sparse COO tensors. + + _sparse_coo_where implements the following invariant: + + _sparse_coo_where(mask, input, fill_value).to_dense(fill_value) == + torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, fill_value)) + + where `a == b` means `assertEqual(a, b)`, mask is boolean sparse + tensor, and `to_dense(fill_value)` is like `to_dense()` except + that the unspecified elements are mapped to `fill_value` rather + than to `0`. + + Returns a sparse COO tensor with the following features: + + - all specified elements correspond to masked-in elements that + have the values of the input tensor. If there exists a masked-in + element (as specified by mask) that is not specified in the + input, in the result tensor, the corresponding element has value + 0. In the dense part of the sparse tensor, the masked-out + elements are replaced with fill_value. + + - all unspecified elements correspond to masked-out elements. + """ + + assert input.layout == torch.sparse_coo + assert mask.layout == input.layout + assert mask.shape == input.shape + assert mask.dense_dim() == input.dense_dim() # TODO: eliminate this restriction + + input = input.coalesce() + + # For set operations on sparse tensor indices, we'll convert + # multi-dimensional indices to 1-D indices for efficiency. + input_flat_indices = _sparse_coo_flatten_indices( + input.indices(), input.shape[: input.sparse_dim()] + ) + mask_flat_indices = _sparse_coo_flatten_indices( + mask.indices(), mask.shape[: mask.sparse_dim()] + ) + + # the set of mask flat indices that define masked-in elements: + if mask.dense_dim() > 0: + mask_values = _any( + mask.values(), tuple(range(1, input.sparse_dim() + 1)), False + ) + else: + mask_values = mask.values() + maskin_flat_indices = mask_flat_indices[mask_values.nonzero()[:, 0]] + + def intersection(i1, i2): + union, counts = torch.cat([i1, i2]).unique(return_counts=True) + return union, torch.where(counts.gt(1)) + + def minus(i1, i2): + union, counts = torch.cat([i1, i2]).unique(return_counts=True) + return intersection(union[torch.where(counts.eq(1))], i1) + + def _apply(a): + obj, w = a + return obj[w] + + # the set of input flat indices of specified and masked-in elements: + maskin_input_flat_indices = _apply( + intersection(maskin_flat_indices, input_flat_indices) + ) + _, w = intersection(input_flat_indices, maskin_input_flat_indices) + + # the indices and values of masked-in elements + where_input_indices = input.indices()[(slice(None),) + w] + where_input_values = input.values()[w] + + if mask.dense_dim() > 0: + # apply mask to the dense part of the input values: + _, w1 = intersection(mask_flat_indices, maskin_input_flat_indices) + where_mask_values = mask.values()[w1] + where_input_values = torch.where( + where_mask_values, where_input_values, fill_value + ) + + # the set of flat indices of unspecified input and masked-in elements: + maskin_zero_flat_indices = _apply( + minus(maskin_flat_indices, maskin_input_flat_indices) + ) + + # the indices of masked-in zero elements + _, w = intersection(mask_flat_indices, maskin_zero_flat_indices) + where_zero_indices = mask.indices()[(slice(None),) + w] + + # construct result + n = where_zero_indices.size(1) + if n == 0: + # the input is coalesced, hence input_flat_indices are ordered + # and the result is guaranteed to be coalesced: + result = torch.sparse_coo_tensor( + where_input_indices, where_input_values, input.shape + ) + return result._coalesced_(True) + + where_indices = torch.cat([where_input_indices, where_zero_indices], dim=1) + where_values = torch.cat( + [ + where_input_values, + where_input_values.new_zeros((n,) + where_input_values.shape[1:]), + ] + ) + result = torch.sparse_coo_tensor(where_indices, where_values, input.shape) + + # appending zero elements leads to uncoalesced sparse tensor + return result.coalesce() + + +def _sparse_coo_scatter_reduction_helper( + op, + mask_input: Tensor, + dims: Tuple[int, ...], + keepdim: bool, + dtype: Optional[DType] = None, +) -> Tensor: + reduce = op.__name__ + valid_reductions = ["sum", "prod", "amax", "amin"] + if reduce not in valid_reductions: + raise ValueError( + f"op must be one of {' '.join(valid_reductions)}, but got {reduce} instead" + ) + + output_dtype = dtype + values, indices = mask_input._values(), mask_input._indices() + input_dims = mask_input.dim() + num_sparse_dims = mask_input.sparse_dim() + reduced_sparse_dims = [] + retained_sparse_dims = [] + reduced_dense_dims = [] + + # promote dtype if specified + if values.dtype != output_dtype: + values = values.to(output_dtype) + + if keepdim: + output_shape = tuple( + 1 if i in dims else si for (i, si) in enumerate(mask_input.shape) + ) + else: + output_shape = tuple( + si for (i, si) in enumerate(mask_input.shape) if i not in dims + ) + + for d in dims: + if d >= input_dims: + continue + + if d < num_sparse_dims: + reduced_sparse_dims.append(d) + else: + reduced_dense_dims.append(d + 1 - num_sparse_dims) + + # Reduce dense dimensions + if len(reduced_dense_dims) > 0: + if reduce == "sum": + new_values = values + new_values = op(new_values, dim=reduced_dense_dims, keepdim=bool(keepdim)) + else: + # FIXME: Implement reductions for dense dimensions for ops with non-zero reduction identities + return NotImplemented + else: + new_values = values.clone() + + # Reduce sparse dimensions + if len(reduced_sparse_dims) == num_sparse_dims: + if reduce in {"amax", "amin"} and new_values.size(0) == 0: + # IndexError: amax(): Expected reduction dim 0 to have non-zero size. + # sum()/prod() return the reduction identity when dim has size 0 but amax()/amin() do not + # See https://github.com/pytorch/pytorch/issues/61901 + new_values = _reduction_identity(reduce, new_values) + else: + new_values = op(new_values, dim=0) + if keepdim: + for _ in range(num_sparse_dims): + new_values = new_values.unsqueeze(0) + return new_values.to(dtype=output_dtype).to_sparse() + else: + new_indices = indices.clone() + if keepdim: + # zero out reduced sparse dimensions if keepdim = True + # ensures that the call to torch.unique folds duplicated indices together while preserving the dimension + new_indices[reduced_sparse_dims, :] = 0 + else: + # remove reduced sparse dimensions if keepdim = False + if len(reduced_sparse_dims) > 0: + retained_sparse_dims = [ + i + for i in range(num_sparse_dims) + if i not in set(reduced_sparse_dims) + ] + new_indices = new_indices.index_select( + 0, torch.tensor(retained_sparse_dims).to(mask_input.device) + ) + + # Use scatter_reduce to reduce items in the new_values tensor that correspond to the same indices in new_indices + if new_indices.numel() > 0: + # lexsort indices and get index tensor for scatter reduction + new_indices, inverse_indices = torch.unique( + new_indices, return_inverse=True, dim=1 + ) + out_shape = list(new_values.shape) + out_shape[0] = new_indices.shape[1] + for _ in range(new_values.ndim - 1): + inverse_indices = inverse_indices.unsqueeze(-1) + scatter_indices = inverse_indices.expand(new_values.shape) + # FIXME: temporary workaround for issue with bfloat16/float16 remove when acctype is implemented for scatter_reduce + if output_dtype in {torch.bfloat16, torch.float16}: + new_values = new_values.to(torch.float) + out = new_values.new_empty(out_shape) + new_values = out.scatter_reduce_( + 0, scatter_indices, new_values, reduce=reduce, include_self=False + ) + new_values = new_values.to(dtype=output_dtype) + else: + out = new_values.new_empty(out_shape) + new_values = out.scatter_reduce_( + 0, scatter_indices, new_values, reduce=reduce, include_self=False + ) + + return torch.sparse_coo_tensor( + new_indices, + new_values, + output_shape, + dtype=output_dtype, + device=mask_input.device, + ) + + +def _sparse_csr_segment_reduction_helper( + op, + mask_input: Tensor, + dims: Tuple[int, ...], + keepdim: bool, + dtype: Optional[DType] = None, +) -> Tensor: + # Currently, while sparse CSR is always 2D with no dense dimensions keepdim must be True + # FIXME: when dense dimensions are implemented for CSR tensors + assert ( + keepdim + ), "reduction operations on CSR tensors with keepdim=False is unsupported" + reduce = op.__name__ + valid_reductions = ["sum", "prod", "mean", "amax", "amin"] + if reduce not in valid_reductions: + raise ValueError( + f"op must be one of {' '.join(valid_reductions)}, but got {reduce} instead" + ) + device = mask_input.device + output_dtype = dtype + values, crow_indices, col_indices = ( + mask_input.values(), + mask_input.crow_indices(), + mask_input.col_indices(), + ) + + # promote dtype if specified + if values.dtype != output_dtype: + values = values.to(output_dtype) + + if len(dims) == 0: + return mask_input + if len(dims) == 1: + if dims[0] == 0: + new_col_indices, scatter_indices = torch.unique( + col_indices, return_inverse=True + ) + new_nnz = new_col_indices.shape[0] + new_crow_indices = torch.tensor([0, new_nnz]) + new_values = values.new_empty(new_col_indices.shape) + new_values.scatter_reduce_( + 0, scatter_indices, values, reduce, include_self=False + ) + new_shape = [1, mask_input.size(1)] + else: + assert ( + dims[0] == 1 + ), "Sparse CSR tensors are 2D and only support reduction along dim 0 or 1." + # all intervals new_crow_indices[i] - new_crow_indices[i-1] are 1 + # except for where crow_indices[i] == crow_indices[i-1] where the interval remains as 0 + new_crow_indices = torch.cat( + ( + crow_indices.new_zeros(1), + torch.cumsum(torch.diff(crow_indices) != 0, 0), + ), + 0, + ) + new_nnz = new_crow_indices[-1] + new_col_indices = col_indices.new_zeros(new_nnz) + new_values = torch._segment_reduce(values, reduce, offsets=crow_indices) # type: ignore[attr-defined] + new_shape = [mask_input.size(0), 1] + else: + assert len(dims) == 2 + nnz = min(1, values.numel()) + if nnz == 1: + op_kwargs = {"keepdim": True, "dtype": output_dtype} + # amax and amin do not support dtype kwarg + if reduce in ["amax", "amin"]: + del op_kwargs["dtype"] + new_values = op(values, 0, **op_kwargs) + else: + new_values = torch.empty(0, dtype=output_dtype) + new_col_indices = col_indices.new_zeros(nnz) + new_crow_indices = torch.tensor([0, nnz]) + new_shape = [1, nnz] + + return torch.sparse_csr_tensor( + new_crow_indices, + new_col_indices, + new_values, + new_shape, + dtype=output_dtype, + device=device, + ) + + +def _sparse_csr_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor: + """Sparse variant of torch.where. Supports sparse CSR tensors.""" + # TODO: implement sparse CSR specific where operator for efficiency + return _sparse_coo_where( + mask.to_sparse_coo(), input.to_sparse_coo(), fill_value + ).to_sparse_csr() + + +def _where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor: + """torch.where with sparse inputs support. + + _where implements the following invariant: + + _where(mask, input, fill_value).to_dense(fill_value) == + torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, fill_value)) + + where `a == b` means `assertEqual(a, b)`, mask is boolean sparse + tensor, and `to_dense(fill_value)` is like `to_dense()` except + that the unspecified elements are mapped to `fill_value` rather + than to `0`. + + Returns a sparse tensor with the following features: + + - all specified elements correspond to masked-in elements that + have the values of the input tensor. If there exists a masked-in + element (as specified by mask) that is not specified in the + input, in the result tensor, the corresponding element has value + 0. In the dense part of the sparse tensor, the masked-out + elements are replaced with fill_value. + + - all unspecified elements correspond to masked-out elements. + """ + if mask.layout == torch.strided: + return torch.where(mask, input, fill_value) + elif mask.layout == torch.sparse_coo: + return _sparse_coo_where(mask, input, fill_value) + elif mask.layout == torch.sparse_csr: + return _sparse_csr_where(mask, input, fill_value) + else: + raise ValueError( + f"_where expects strided or sparse COO or sparse CSR tensor but got {mask.layout}" + ) + + +def _input_mask(input: Union[Tensor, MaskedTensor], *args, **kwargs) -> Tensor: + """Return canonical input mask. + + A canonical input mask is defined as a boolean mask tensor that + shape and layout matches with the shape and the layout of the + input. + + The canonical input mask is computed from the :attr:`mask` tensor + content to meet the following criteria: + + 1. The shape of the canonical input mask is the same as the shape + of :attr:`input` tensor. If the mask tensor has a smaller shape + than the shape of the :attr:`input`, broadcasting rules will be + applied. Downcasting of mask is not supported. + + 2. The layout of the canonical input mask is the same as the + layout of the :attr:`input` tensor. If the mask has different + layout, it will be converted to the expected layout. In the + case of sparse COO layout, the canonical input mask will be + coalesced. + + 3. The dtype of the canonical input mask is torch.bool. If the + mask dtype is not bool then it will be converted to bool dtype + using `.to(dtype=bool)` method call. + + 4. The elements of the canonical input mask have boolean values + copied from the content of the :attr:`mask` tensor (after + possible broadcasting and dtype conversion transforms). In + general, the sparsity pattern of the sparse canonical input + mask need not to be the same as the sparsity pattern of the + sparse :attr:`input` tensor. + + """ + if input.layout not in {torch.strided, torch.sparse_coo, torch.sparse_csr}: + raise ValueError( + f"_input_mask expects strided or sparse COO or sparse CSR tensor but got {input.layout}" + ) + + mask = kwargs.get("mask") + + # default mask + if mask is None: + raise ValueError("_input_mask requires explicit mask") + + # mask shape must match with input shape + if mask.shape != input.shape: + if mask.ndim > input.ndim: + raise IndexError( + "_input_mask expected broadcastable mask (got mask dimensionality higher than of the input)" + ) + if mask.layout == torch.strided: + mask = torch.broadcast_to(mask.clone(), input.shape).to(dtype=torch.bool) + elif mask.layout == torch.sparse_coo: + mask = torch._sparse_broadcast_to(mask, input.shape) + else: + assert mask.layout == torch.sparse_csr + # Broadcasting of CSR tensors is not implemented. Working + # around by using COO layout. + mask = torch._sparse_broadcast_to( + mask.to_sparse(), input.shape + ).to_sparse_csr() + + # mask layout must match with input layout + if mask.layout != input.layout: + if input.layout == torch.strided: + mask = mask.to_dense() + elif input.layout == torch.sparse_coo: + if mask.layout == torch.strided: + mask = mask.to_sparse(input.sparse_dim()) + else: + mask = mask.to_sparse() + else: + assert input.layout == torch.sparse_csr + mask = mask.to_sparse_csr() + + # sparse mask must be coalesced + if mask.layout == torch.sparse_coo: + mask = mask.coalesce() + + # mask is a boolean tensor + mask = mask.to(dtype=torch.bool) + + return mask + + +def _output_mask(op, input: Tensor, *args, **kwargs) -> Tensor: + """Return output mask of masked operation applied to given arguments.""" + if callable(op): + is_reduction = op.__name__ in { + "sum", + "prod", + "amax", + "amin", + "argmax", + "argmin", + "mean", + "median", + "norm", + "var", + "std", + "logsumexp", + } + is_normalization = op.__name__ in { + "softmax", + "log_softmax", + "softmin", + "normalize", + "cumsum", + "cumprod", + } + if is_reduction: + if op.__name__ == "norm": + if args: + args = args[1:] # lstrip ord argument + dim = args[0] if args else kwargs.get("dim") + outmask = _input_mask(input, *args, **kwargs) + keepdim = kwargs.get("keepdim", False) + dim_ = _canonical_dim(dim, input.ndim) + return _any(outmask, dim_, bool(keepdim)) + elif is_normalization: + return _input_mask(input, *args, **kwargs) + else: + raise ValueError( + f"_output_mask expected masked operation (got callable {op.__module__}.{op.__name__})" + ) + else: + raise ValueError( + f"_output_mask expected masked operation (got {type(op).__name__} object)" + ) + + +def _combine_input_and_mask( + op, input: Union[MaskedTensor, Tensor], mask, *args +) -> Tensor: + def helper(input, mask): + if mask is None: + return input + canonical_mask = _input_mask(input, mask=mask) + if callable(op): + fill_value = _reduction_identity(op.__name__, input, *args) + return _where(canonical_mask, input, fill_value) + else: + raise ValueError( + f"_combine_input_and_mask expected masked operation (got {type(op).__name__} object)" + ) + + class Combine(torch.autograd.Function): + @staticmethod + def forward(ctx, input, mask): + """Return input with masked-out elements eliminated for the given operations.""" + ctx.save_for_backward(mask) + + if mask is not None: + ctx.mark_non_differentiable(mask) + + return helper(input, mask) + + @staticmethod + def backward(ctx, grad_output): + (mask,) = ctx.saved_tensors + grad_data = ( + grad_output.get_data() if is_masked_tensor(grad_output) else grad_output + ) + result = as_masked_tensor(grad_data, mask) + return result, None + + return ( + Combine.apply(input.get_data(), input.get_mask()) # type: ignore[union-attr] + if is_masked_tensor(input) + else helper(input, mask) + ) + + +@_apply_docstring_templates +def sum( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + # __doc__ is generated by _apply_docstring_templates decorator + if dtype is None: + # promote integer types to int64 when output dtype is not specified + if input.layout == torch.sparse_csr: + if input.dtype in { + torch.uint8, + torch.bool, + torch.int8, + torch.int16, + torch.int32, + }: + # csr.to(dtype=torch.int64) is not implemented, so + # using coo.to on input to ensure the promoted dtype + input = input.to_sparse_coo().to(dtype=torch.int64).to_sparse_csr() + else: + dtype = input.dtype + else: + dtype = input.dtype + if input.dtype in { + torch.uint8, + torch.bool, + torch.int8, + torch.int16, + torch.int32, + }: + dtype = torch.int64 + dim_ = _canonical_dim(dim, input.ndim) + mask_input = _combine_input_and_mask(sum, input, mask) + if mask_input.layout == torch.strided: + return torch.sum(mask_input, dim_, bool(keepdim), dtype=dtype) + elif mask_input.layout == torch.sparse_coo: + return _sparse_coo_scatter_reduction_helper( + torch.sum, mask_input, dim_, bool(keepdim), dtype + ) + elif mask_input.layout == torch.sparse_csr: + return torch._sparse_csr_sum( + mask_input, dim=list(dim_), keepdim=bool(keepdim), dtype=dtype + ) + else: + raise ValueError( + f"masked sum expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def prod( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + # __doc__ is generated by _apply_docstring_templates decorator + if dtype is None: + # promote integer types to int64 when output dtype is not specified + if input.layout == torch.sparse_csr: + if input.dtype in { + torch.uint8, + torch.bool, + torch.int8, + torch.int16, + torch.int32, + }: + # csr.to(dtype=torch.int64) is not implemented, so + # using coo.to on input to ensure the promoted dtype + input = input.to_sparse_coo().to(dtype=torch.int64).to_sparse_csr() + else: + dtype = input.dtype + else: + dtype = input.dtype + if input.dtype in { + torch.uint8, + torch.bool, + torch.int8, + torch.int16, + torch.int32, + }: + dtype = torch.int64 + dim_ = _canonical_dim(dim, input.ndim) + mask_input = _combine_input_and_mask(prod, input, mask) + if mask_input.layout == torch.strided: + # Workaround https://github.com/pytorch/pytorch/issues/56586 + result = mask_input + result = result.to(dtype=dtype) + for d in reversed(dim_): + result = result.prod(dim=d, keepdim=bool(keepdim)) + return result + elif mask_input.layout == torch.sparse_coo: + if mask is None: + # See comment in the sparse_csr branch, the same issue arises for sparse_coo tensors + raise ValueError( + "masked prod expects explicit mask for sparse_coo tensor input" + ) + return _sparse_coo_scatter_reduction_helper( + torch.prod, mask_input, dim_, bool(keepdim), dtype + ) + elif mask_input.layout == torch.sparse_csr: + if mask is None: + # mask is None corresponds to all-True mask. The + # unspecified elements in the CSR tensor correspond to + # zero values. Hence, the prod reduction result is + # automatically zero unless all elements are specified. + # A semi-optimal way to take this into account is to use: + # + # masked_prod(csr, ..., mask=None) == torch._sparse_csr_prod(csr, ...) * all(csr.nonzero(), ...) + # + # but that requires implementing `all` and `nonzero` + # support for sparse csr tensors. + raise ValueError( + "masked prod expects explicit mask for sparse_csr tensor input" + ) + return torch._sparse_csr_prod( + mask_input, dim=list(dim_), keepdim=bool(keepdim), dtype=dtype + ) + else: + raise ValueError( + f"masked prod expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def cumsum( + input: Tensor, + dim: int, + *, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + mask_input = _combine_input_and_mask(sum, input, mask) + if mask_input.layout == torch.strided: + return torch.cumsum(mask_input, dim_, dtype=dtype).to(dtype=dtype) + else: + raise ValueError( + f"masked cumsum expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def cumprod( + input: Tensor, + dim: int, + *, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + mask_input = _combine_input_and_mask(prod, input, mask) + if mask_input.layout == torch.strided: + return torch.cumprod(mask_input, dim_, dtype=dtype).to(dtype=dtype) + else: + raise ValueError( + f"masked cumprod expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def amax( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} + +{reduction_descr} + +{reduction_identity_dtype} + +{reduction_args} + +{reduction_example}""" + if dtype is None: + dtype = input.dtype + + mask_input = _combine_input_and_mask(amax, input, mask) + dim_ = _canonical_dim(dim, mask_input.ndim) + if mask_input.layout == torch.strided: + return torch.amax(mask_input, dim_, bool(keepdim)).to(dtype=dtype) + elif mask_input.layout == torch.sparse_coo: + if mask is None: + # See comment in the sparse_csr branch of prod, a similar issue arises here + # where unspecified elements along a dimension may need to be reduced with the result + raise ValueError( + "masked amax expects explicit mask for sparse_coo tensor input" + ) + return _sparse_coo_scatter_reduction_helper( + torch.amax, mask_input, dim_, bool(keepdim), dtype + ) + elif mask_input.layout == torch.sparse_csr: + if mask is None: + raise ValueError( + "masked amax expects explicit mask for sparse_csr tensor input" + ) + return _sparse_csr_segment_reduction_helper( + torch.amax, mask_input, dim_, bool(keepdim), dtype + ) + else: + raise ValueError( + f"masked amax expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def amin( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} + +{reduction_descr} + +{reduction_identity_dtype} + +{reduction_args} + +{reduction_example}""" + if dtype is None: + dtype = input.dtype + + mask_input = _combine_input_and_mask(amin, input, mask) + dim_ = _canonical_dim(dim, mask_input.ndim) + if mask_input.layout == torch.strided: + return torch.amin(mask_input, dim_, bool(keepdim)).to(dtype=dtype) + elif mask_input.layout == torch.sparse_coo: + if mask is None: + # See comment in the sparse_csr branch of prod, a similar issue arises here + # where unspecified elements along a dimension may need to be reduced with the result + raise ValueError( + "masked amax expects explicit mask for sparse_coo tensor input" + ) + return _sparse_coo_scatter_reduction_helper( + torch.amin, mask_input, dim_, bool(keepdim), dtype + ) + elif mask_input.layout == torch.sparse_csr: + if mask is None: + raise ValueError( + "masked amin expects explicit mask for sparse_csr tensor input" + ) + return _sparse_csr_segment_reduction_helper( + torch.amin, mask_input, dim_, bool(keepdim), dtype + ) + else: + raise ValueError( + f"masked amin expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def argmax( + input: Union[Tensor, MaskedTensor], + dim: Optional[int] = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} +{reduction_descr} +{reduction_identity_dtype} +{reduction_args} +{reduction_example}""" + if dtype is None: + dtype = input.dtype + mask_input = _combine_input_and_mask(argmax, input, mask) + if mask_input.layout == torch.strided: + return torch.argmax(mask_input, dim, bool(keepdim)).to(dtype=dtype) + else: + raise ValueError( + f"masked argmax expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def argmin( + input: Union[Tensor, MaskedTensor], + dim: Optional[int] = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} +{reduction_descr} +{reduction_identity_dtype} +{reduction_args} +{reduction_example}""" + if dtype is None: + dtype = input.dtype + mask_input = _combine_input_and_mask(argmin, input, mask) + if mask_input.layout == torch.strided: + return torch.argmin(mask_input, dim, bool(keepdim)).to(dtype=dtype) + else: + raise ValueError( + f"masked argmin expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def mean( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} + +{reduction_descr} + +By definition, the identity value of a mean operation is the mean +value of the tensor. If all elements of the input tensor along given +dimension(s) :attr:`dim` are masked-out, the identity value of the +mean is undefined. Due to this ambiguity, the elements of output +tensor with strided layout, that correspond to fully masked-out +elements, have ``nan`` values. + +{reduction_args} + +{reduction_example}""" + if dtype is None: + dtype = input.dtype + if input.layout == torch.strided: + if mask is None: + # TODO: compute count analytically + count = sum( + torch.ones(input.shape, dtype=torch.int64, device=input.device), + dim, + keepdim=keepdim, + ) + total = sum(input, dim, keepdim=keepdim, dtype=dtype) + else: + inmask = _input_mask(input, mask=mask) + count = sum( + inmask.new_ones(input.shape, dtype=torch.int64), + dim, + keepdim=keepdim, + mask=inmask, + ) + total = sum(input, dim, keepdim=keepdim, dtype=dtype, mask=inmask) + return total / count + elif input.layout == torch.sparse_csr: + mask_input = _combine_input_and_mask(mean, input, mask) + dim_ = _canonical_dim(dim, mask_input.ndim) + if mask is None: + raise ValueError( + "masked mean expects explicit mask for sparse_csr tensor input" + ) + return _sparse_csr_segment_reduction_helper( + torch.mean, mask_input, dim_, bool(keepdim), dtype + ) + else: + raise ValueError( + f"masked mean expects strided or sparse_csr tensor (got {input.layout} tensor)" + ) + + +@_apply_docstring_templates +def median( + input: Union[Tensor, MaskedTensor], + dim: int = -1, + *, + keepdim: bool = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + + """\ +{reduction_signature} +{reduction_descr} +By definition, the identity value of a median operation is the median +value of the tensor. If all elements of the input tensor along given +dimension(s) :attr:`dim` are masked-out, the identity value of the +median is undefined. Due to this ambiguity, the elements of output +tensor with strided layout, that correspond to fully masked-out +elements, have ``nan`` values. +{reduction_args} +{reduction_example}""" + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + is_float = torch.is_floating_point(input) + if not is_float: + input = input.to(dtype=torch.float) + mask_input = _combine_input_and_mask(median, input, mask) + if mask_input.layout == torch.strided: + output = torch.nanmedian(mask_input, dim_, keepdim).values + if is_float: + return output + elif not is_float and not torch.isnan(output).any(): + return output.to(dtype=dtype) + else: + raise ValueError( + "masked median expects no fully masked out rows if dtype is not floating point" + ) + else: + raise ValueError( + f"masked median expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def logsumexp( + input: Tensor, + dim: DimOrDims = None, + *, + keepdim: bool = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim) + mask_input = _combine_input_and_mask(logsumexp, input, mask) + if mask_input.layout == torch.strided: + return torch.logsumexp(mask_input, dim_, keepdim=keepdim).to(dtype=dtype) + else: + raise ValueError( + f"masked logsumexp expects strided tensor (got {mask_input.layout} tensor)" + ) + + +# Cannot use _apply_docstring_templates as it is only set up for reductions and normalizations +def logaddexp( + input: Union[Tensor, MaskedTensor], + other: Union[Tensor, MaskedTensor], + *, + dtype: Optional[DType] = None, + input_mask: Optional[Tensor] = None, + other_mask: Optional[Tensor] = None, +) -> Tensor: + """logaddexp(input, other, *, dtype=None, input_mask=None, other_mask=None) -> Tensor + +Returns logaddexp of all the elements in the :attr:`input` and the :attr:`other` +tensor. The :attr:`input` elements are masked out according to the boolean tensor +:attr:`input_mask` and the attr:`other` elements are masked out according to the boolean tensor +:attr:`other_mask`. + +The shapes of a mask tensor and the tensor to be masked +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the mask +tensor must not be greater than of the tensor to be masked. + +Args: + input (Tensor): the input tensor + other (Tensor): the second input tensor + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the output tensor is + casted to :attr:`dtype` after the operation is + performed. Default: None. + input_mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of :attr:`input` tensor elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + other_mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of :attr:`other` tensor elements. + Default: None that is equivalent to ``torch.ones(other.shape, dtype=torch.bool)``. + +Example:: + + >>> input = torch.tensor([-100.0, -200, -300]) + >>> input + tensor([-100., -200., -300.]) + >>> other = torch.tensor([-1.0, -2, -3]) + >>> other + tensor([-1., -2., -3.]) + >>> mask = torch.tensor([True, False, True]) + >>> mask + tensor([ True, False, True]) + >>> torch.masked._ops.logaddexp(input, other, input_mask=mask, other_mask=mask) + tensor([-1., -inf, -3.]) +""" + if dtype is None: + dtype = input.dtype + if input.layout == torch.strided and other.layout == torch.strided: + mask_input = _combine_input_and_mask(logsumexp, input, input_mask) + mask_other = _combine_input_and_mask(logsumexp, other, other_mask) + return torch.logaddexp(mask_input, mask_other).to(dtype=dtype) + else: + raise ValueError( + f"masked logaddexp expects strided tensors (got {input.layout} tensor for input, {other.layout} for other)" + ) + + +@_apply_docstring_templates +def norm( + input: Union[Tensor, MaskedTensor], + ord: Optional[float] = 2.0, + dim: DimOrDims = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} + +{reduction_descr} + +The identity value of norm operation, which is used to start the +reduction, is ``{identity_float32}``, except for ``ord=-inf`` it is +``{identity_ord_ninf}``. + +{reduction_args} + +{reduction_example}""" + if dtype is None: + dtype = input.dtype + mask_input = _combine_input_and_mask(norm, input, mask, ord) + if mask_input.layout == torch.strided: + dim_ = _canonical_dim(dim, input.ndim) + return torch.linalg.vector_norm( + mask_input, ord, dim_, bool(keepdim), dtype=dtype + ) + else: + raise ValueError( + f"masked norm expects strided tensor (got {mask_input.layout} tensor)" + ) + + +def _std_var( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims, + unbiased: Optional[bool], + *, + correction_opt: Optional[Union[int, float]], + keepdim: Optional[bool], + dtype: Optional[DType], + mask: Optional[Tensor], + take_sqrt: Optional[bool], +) -> Tensor: + assert (unbiased is None or correction_opt is None), "Only one of unbiased and correction may be given" + correction = 1.0 + if unbiased is not None: + correction = 1.0 if unbiased else 0.0 + if correction_opt is not None: + correction = sym_float(correction_opt) + + if dtype is None: + dtype = input.dtype + if not (dtype.is_floating_point or dtype.is_complex): + dtype = torch.float32 + compute_dtype = dtype + if not (compute_dtype.is_floating_point or compute_dtype.is_complex): + compute_dtype = torch.float32 + if input.layout == torch.strided: + if mask is None: + # TODO: compute count analytically + count = sum( + torch.ones(input.shape, dtype=torch.int64, device=input.device), + dim, + keepdim=True, + ) + sample_total = sum(input, dim, keepdim=True, dtype=dtype) + else: + inmask = _input_mask(input, mask=mask) + count = sum( + inmask.new_ones(input.shape, dtype=torch.int64), + dim, + keepdim=True, + mask=inmask, + ) + sample_total = sum(input, dim, keepdim=True, dtype=dtype, mask=inmask) + # TODO: replace torch.subtract/divide/square/maximum with + # masked subtract/divide/square/maximum when these will be + # available. + sample_mean = torch.divide(sample_total, count) + x = torch.subtract(input, sample_mean) + if mask is None: + total = sum(x * x.conj(), dim, keepdim=keepdim, dtype=compute_dtype) + else: + total = sum( + x * x.conj(), dim, keepdim=keepdim, dtype=compute_dtype, mask=inmask + ) + if not keepdim: + count = count.reshape(total.shape) + if correction != 0: + real_dtype = (corresponding_real_dtype(compute_dtype) + if compute_dtype.is_complex else compute_dtype) + count = count.to(real_dtype) + count = torch.subtract(count, correction) + count = torch.maximum(count, count.new_zeros([])) + output = torch.divide(total, count).to(dtype=dtype) + if take_sqrt: + output = torch.sqrt(output) + return output + else: + raise ValueError( + f"masked std/var expects strided tensor (got {input.layout} tensor)" + ) + + +@_apply_docstring_templates +def var( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + unbiased: Optional[bool] = None, + *, + correction: Optional[Union[int, float]] = None, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} +{reduction_descr} +The identity value of sample variance operation is undefined. The +elements of output tensor with strided layout, that correspond to +fully masked-out elements, have ``nan`` values. +{reduction_args} +{reduction_example}""" + return _std_var( + input=input, + dim=dim, + unbiased=unbiased, + correction_opt=correction, + keepdim=keepdim, + dtype=dtype, + mask=mask, + take_sqrt=False, + ) + + +@_apply_docstring_templates +def std( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + unbiased: Optional[bool] = None, + *, + correction: Optional[int] = None, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} +{reduction_descr} +The identity value of sample standard deviation operation is undefined. The +elements of output tensor with strided layout, that correspond to +fully masked-out elements, have ``nan`` values. +{reduction_args} +{reduction_example}""" + return _std_var( + input=input, + dim=dim, + unbiased=unbiased, + correction_opt=correction, + keepdim=keepdim, + dtype=dtype, + mask=mask, + take_sqrt=True, + ) + + +@_apply_docstring_templates +def softmax( + input: Union[Tensor, MaskedTensor], + dim: int, + *, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + mask_input = _combine_input_and_mask(amax, input, mask) + if mask_input.layout == torch.strided: + return torch.nn.functional.softmax(mask_input, dim_, dtype=dtype) + else: + raise ValueError( + f"masked softmax expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def log_softmax( + input: Union[Tensor, MaskedTensor], + dim: int, + *, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + mask_input = _combine_input_and_mask(amax, input, mask) + if mask_input.layout == torch.strided: + return torch.nn.functional.log_softmax(mask_input, dim_, dtype=dtype) + else: + raise ValueError( + f"masked log_softmax expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def softmin( + input: Union[Tensor, MaskedTensor], + dim: int, + *, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + mask_input = _combine_input_and_mask(amin, input, mask) + if mask_input.layout == torch.strided: + return torch.nn.functional.softmin(mask_input, dim_, dtype=dtype) + else: + raise ValueError( + f"masked softmin expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def normalize( + input: Union[Tensor, MaskedTensor], + ord: float, + dim: int, + *, + eps: float = 1e-12, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + # TODO: eliminate mask_input as unnecessary when using masked divide. + mask_input = _combine_input_and_mask(sum, input, mask) + if mask_input.layout == torch.strided: + nrm_ = norm(input, ord, dim, keepdim=True, dtype=dtype, mask=mask) + # TODO: replace torch.maximum with masked maximum when available. + denom = torch.maximum(nrm_, nrm_.new_full([], eps)) + # TODO: replace torch.divide with masked divide when available. + return torch.divide(mask_input, denom) + else: + raise ValueError( + f"masked normalize expects strided tensor (got {mask_input.layout} tensor)" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e38e03c87086cf50d031dd5591f64f65399d6ac1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# flake8: noqa + +from .binary import _apply_native_binary, _is_native_binary +from .core import is_masked_tensor, MaskedTensor +from .passthrough import _apply_pass_through_fn, _is_pass_through_fn +from .reductions import _apply_reduction, _is_reduction +from .unary import _apply_native_unary, _is_native_unary diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a95836da13a31d2141fdeb7dca86d826453fb6fa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/_ops_refs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/_ops_refs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14e8502fad0f1ef4d4cbfaf6f17d127b57a77ca5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/_ops_refs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/binary.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/binary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4149bade98c9d7a8a32ce433ba3c9c6da3ed4a1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/binary.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/core.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac2ec58495db0efd52233babddddd9dad1e386b8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/core.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/creation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/creation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34bbcbd6d3cacc1b0b10d6cf060a5bd3457eba55 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/creation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/passthrough.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/passthrough.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..287e20159f17f229a827d57a4f7b7bd2c887cad4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/passthrough.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/reductions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/reductions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c50ac53231fede82bd8a8158f19575903dd1b74 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/reductions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/unary.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/unary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb48b0cc37233bc53e3cd3cfeb0df7199ad6c07b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/unary.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/_ops_refs.py b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/_ops_refs.py new file mode 100644 index 0000000000000000000000000000000000000000..81ca2bda65b9c79d7f693e9d2e88b2b7681d6f26 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/_ops_refs.py @@ -0,0 +1,473 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +from functools import partial +import torch + +from .binary import ( + _apply_native_binary, + NATIVE_BINARY_FNS, + NATIVE_INPLACE_BINARY_FNS, +) +from .core import is_masked_tensor, MaskedTensor, _get_data, _masks_match, _maybe_get_mask +from .passthrough import ( + _apply_pass_through_fn, + PASSTHROUGH_FNS +) +from .reductions import ( + _apply_reduction, + NATIVE_REDUCE_FNS, + TORCH_REDUCE_FNS, + TENSOR_REDUCE_FNS, +) +from .unary import ( + _apply_native_unary, + NATIVE_UNARY_FNS, + NATIVE_INPLACE_UNARY_FNS, +) + + +__all__ = [] # type: ignore[var-annotated] + + +def _check_args_kwargs_length(args, kwargs, error_prefix, len_args=None, len_kwargs=None): + if len_args is not None and len_args != len(args): + raise ValueError(f"{error_prefix}: len(args) must be {len_args} but got {len(args)}") + if len_kwargs is not None and len_kwargs != len(kwargs): + raise ValueError(f"{error_prefix}: len(kwargs) must be {len_kwargs} but got {len(kwargs)}") + + +class _MaskedContiguous(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + if not is_masked_tensor(input): + raise ValueError("MaskedContiguous forward: input must be a MaskedTensor.") + + if input.is_contiguous(): + return input + + data = input.get_data() + mask = input.get_mask() + + return MaskedTensor(data.contiguous(), mask.contiguous()) + + @staticmethod + def backward(ctx, grad_output): + return grad_output + + +class _MaskedToDense(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + if not is_masked_tensor(input): + raise ValueError("MaskedToDense forward: input must be a MaskedTensor.") + + if input.layout == torch.strided: + return input + + ctx.layout = input.layout + data = input.get_data() + mask = input.get_mask() + + return MaskedTensor(data.to_dense(), mask.to_dense()) + + @staticmethod + def backward(ctx, grad_output): + layout = ctx.layout + + if layout == torch.sparse_coo: + return grad_output.to_sparse_coo() + elif layout == torch.sparse_csr: + return grad_output.to_sparse_csr() + elif layout == torch.strided: + return grad_output.to_dense() + raise ValueError("to_dense: Unsupported input layout: ", layout) + + +class _MaskedToSparse(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + if not is_masked_tensor(input): + raise ValueError("MaskedToSparse forward: input must be a MaskedTensor.") + + # Following the convention from sparse tensors that to_sparse always means that we convert to sparse_coo + if input.layout == torch.sparse_coo: + return input + + data = input.get_data() + mask = input.get_mask() + sparse_mask = mask.to_sparse_coo().coalesce() + sparse_data = data.sparse_mask(sparse_mask) + + return MaskedTensor(sparse_data, sparse_mask) + + @staticmethod + def backward(ctx, grad_output): + return grad_output.to_dense() + + +class _MaskedToSparseCsr(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + if not is_masked_tensor(input): + raise ValueError("MaskedToSparseCsr forward: input must be a MaskedTensor.") + + if input._masked_data.ndim != 2: + raise ValueError(f"Only 2D tensors can be converted to the SparseCsr layout but got shape: {input._masked_data.size()}") + + if input.layout == torch.sparse_csr: + return input + + data = input.get_data() + mask = input.get_mask() + sparse_mask = mask.to_sparse_csr() + sparse_data = data.sparse_mask(sparse_mask) + + return MaskedTensor(sparse_data, sparse_mask) + + @staticmethod + def backward(ctx, grad_output): + return grad_output.to_dense() + + +class _MaskedWhere(torch.autograd.Function): + @staticmethod + def forward(ctx, cond, self, other): + ctx.mark_non_differentiable(cond) + ctx.save_for_backward(cond) + return torch.ops.aten.where(cond, self, other) + + @staticmethod + def backward(ctx, grad_output): + (cond,) = ctx.saved_tensors + + def masked_out_like(mt): + return MaskedTensor(mt.get_data(), torch.zeros_like(mt.get_mask()).bool()) + + return ( + None, + torch.ops.aten.where(cond, grad_output, masked_out_like(grad_output)), + torch.ops.aten.where(cond, masked_out_like(grad_output), grad_output), + ) + + +_MASKEDTENSOR_FUNCTION_TABLE = {} + +_function_fn_apply_map = { + (tuple(NATIVE_REDUCE_FNS), tuple(TORCH_REDUCE_FNS), tuple(TENSOR_REDUCE_FNS)): _apply_reduction, +} + +for fn_map_list, apply_fn in _function_fn_apply_map.items(): + for fn_map in fn_map_list: + for fn in fn_map: + _MASKEDTENSOR_FUNCTION_TABLE[fn] = partial(apply_fn, fn) + + +def register_function_func(ops): + """ + Used for registering a new __torch_function__ function to MaskedTensor + Called via _MASKEDTENSOR_FUNCTION_TABLE[func](*args, **kwargs) + + The code to register a new function looks like: + + @register_function_func(list_of_ops) + def foo(func, *args, **kwargs): + + """ + def wrapper(func): + for op in ops: + _MASKEDTENSOR_FUNCTION_TABLE[op] = partial(func, op) + return wrapper + + +@register_function_func(NATIVE_REDUCE_FNS + TORCH_REDUCE_FNS + TENSOR_REDUCE_FNS) +def _general_function_reductions(func, *args, **kwargs): + return _apply_reduction(func, *args, **kwargs) + + +@register_function_func([torch.Tensor.where, torch.where]) +def _function_where(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, "__torch_function__, torch.where", len_args=3, len_kwargs=0) + return _MaskedWhere.apply(*args) + + +@register_function_func([torch.Tensor.contiguous]) +def _function_contiguous(func, *args, **kwargs): + return _MaskedContiguous.apply(args[0]) + + +@register_function_func([torch.Tensor.to_dense]) +def _function_to_dense(func, *args, **kwargs): + return _MaskedToDense.apply(args[0]) + + +@register_function_func([torch.Tensor.to_sparse]) +def _function_to_sparse(func, *args, **kwargs): + return _MaskedToSparse.apply(args[0]) + + +@register_function_func([torch.Tensor.to_sparse_csr]) +def _function_to_sparse_csr(func, *args, **kwargs): + return _MaskedToSparseCsr.apply(args[0]) + + +_MASKEDTENSOR_DISPATCH_TABLE = {} + +def register_dispatch_func(aten_ops): + """ + Used for registering a new __torch_dispatch__ function to MaskedTensor + Called via _MASKEDTENSOR_DISPATCH_TABLE[func](*args, **kwargs) + + The code to register a new function looks like: + + @register_dispatch_func(list_of_ops) + def foo(func, *args, **kwargs): + + """ + def wrapper(func): + for aten_op in aten_ops: + _MASKEDTENSOR_DISPATCH_TABLE[aten_op] = partial(func, aten_op) + return wrapper + + +@register_dispatch_func(NATIVE_REDUCE_FNS + TORCH_REDUCE_FNS + TENSOR_REDUCE_FNS) +def _general_reduction(func, *args, **kwargs): + return _apply_reduction(func, *args, **kwargs) + + +@register_dispatch_func(PASSTHROUGH_FNS) +def _general_passthrough(func, *args, **kwargs): + return _apply_pass_through_fn(func, *args, **kwargs) + + +@register_dispatch_func(NATIVE_UNARY_FNS + NATIVE_INPLACE_UNARY_FNS) +def _general_unary(func, *args, **kwargs): + return _apply_native_unary(func, *args, **kwargs) + + +@register_dispatch_func(NATIVE_BINARY_FNS + NATIVE_INPLACE_BINARY_FNS) +def _general_binary(func, *args, **kwargs): + return _apply_native_binary(func, *args, **kwargs) + + +@register_dispatch_func([torch.ops.aten.stride]) +def stride(func, *args, **kwargs): + return None + + +@register_dispatch_func([torch.ops.aten.sym_stride]) +def sym_stride(func, *args, **kwargs): + return None + + +@register_dispatch_func([torch.ops.prim.layout]) +def layout(func, *args, **kwargs): + return _get_data(args[0]).layout + + +@register_dispatch_func([torch.ops.aten.is_contiguous]) +def is_contiguous(func, *args, **kwargs): + data = _get_data(args[0]) + if data.is_sparse: + raise ValueError( + "MaskedTensors with sparse data do not have is_contiguous" + ) + return func(data, *args[1:], **kwargs) + + +@register_dispatch_func([torch.ops.aten.is_strides_like_format]) +def is_strides_like_format(func, *args, **kwargs): + data = _get_data(args[0]) + if data.is_sparse: + raise ValueError( + "MaskedTensors with sparse data do not have is_strides_like_format" + ) + return func(data, *args[1:], **kwargs) + + +@register_dispatch_func([torch.ops.aten.is_non_overlapping_and_dense]) +def is_non_overlapping_and_dense(func, *args, **kwargs): + data = _get_data(args[0]) + if data.is_sparse: + raise ValueError( + "MaskedTensors with sparse data do not have is_non_overlapping_and_dense" + ) + return func(data, *args[1:], **kwargs) + + +@register_dispatch_func([torch.ops.aten.contiguous]) +def contiguous(func, *args, **kwargs): + if _get_data(args[0]).is_sparse: + raise ValueError( + "MaskedTensors with sparse data do not have contiguous" + ) + return _MaskedContiguous.apply(args[0]) + + +@register_dispatch_func([torch.ops.aten.new_empty_strided]) +def new_empty_strided(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=3) + data = _get_data(args[0]) + mask = _maybe_get_mask(args[0]) + if tuple(args[1]) != tuple(data.size()): + raise ValueError(f"__torch_dispatch__, {func}: args[1] expected to be the same as data.size()") + if tuple(args[2]) != tuple(data.stride()): + raise ValueError(f"__torch_dispatch__, {func}: args[2] expected to be the same as data.stride()") + return MaskedTensor(func(data, args[1], args[2], **kwargs), mask) + + +@register_dispatch_func([torch.ops.aten._local_scalar_dense]) +def _local_scalar_dense(func, *args, **kwargs): + if not _maybe_get_mask(args[0]): + raise ValueError(f"__torch_dispatch__, {func}: expected a mask tensor") + return torch.ops.aten._local_scalar_dense(_get_data(args[0])) + + +@register_dispatch_func([torch.ops.aten.detach, torch.ops.aten.clone]) +def _apply_fn_on_data(func, *args, **kwargs): + return MaskedTensor(func(_get_data(args[0])), _maybe_get_mask(args[0])) + + +@register_dispatch_func([torch.ops.aten._to_copy]) +def _to_copy(func, *args, **kwargs): + new_data = func(_get_data(args[0]), *args[1:], **kwargs) + return MaskedTensor(new_data, _maybe_get_mask(args[0])) + + +@register_dispatch_func([torch.ops.aten._softmax]) +def _softmax(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=3, len_kwargs=0) + data = _get_data(args[0]) + mask = _maybe_get_mask(args[0]) + result_data = torch.ops.aten._masked_softmax(data, ~mask, args[1], 2) + return MaskedTensor(result_data, mask) + + +@register_dispatch_func([torch.ops.aten.ones_like]) +def ones_like(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=1) + result_data = func(_get_data(args[0]), **kwargs) + return MaskedTensor(result_data, _maybe_get_mask(args[0])) + + +@register_dispatch_func([torch.ops.aten._softmax_backward_data]) +def _softmax_backward_data(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=4) + grad, output, dim, input_dtype = args + if is_masked_tensor(grad) and is_masked_tensor(output): + if not _masks_match(grad, output): + raise ValueError("__torch_dispatch__, {func}: expected the masks of grad and output to match") + grad_data = _get_data(grad) + new_grad_data = torch.ops.aten._masked_softmax_backward( + grad_data, + _get_data(output), + ~_maybe_get_mask(grad), + dim % grad_data.ndim, + ) + res = MaskedTensor(new_grad_data, _maybe_get_mask(grad)) + return res + else: + raise ValueError(f"__torch_dispatch__, {func}: grad and output must both be MaskedTensors") + + +@register_dispatch_func([torch.ops.aten.copy_]) +def copy_(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=2) + if not _masks_match(_maybe_get_mask(args[0]), _maybe_get_mask(args[1])): + raise ValueError("args[0] mask and args[1] mask must match but do not") + func(_get_data(args[0]), _get_data(args[1])) + return args[0] + + +@register_dispatch_func([torch.ops.aten.where]) +def where(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=3, len_kwargs=0) + if not torch.is_tensor(args[0]): + raise ValueError("__torch_dispatch__, {func}: expected args[0] to be a tensor") + mx = args[1] + my = args[2] + if not is_masked_tensor(mx): + mx = MaskedTensor(mx, torch.ones_like(mx, dtype=torch.bool)) + if not is_masked_tensor(my): + my = MaskedTensor(my, torch.ones_like(my, dtype=torch.bool)) + new_data = func(args[0], mx.get_data(), my.get_data()) + new_mask = func(args[0], mx.get_mask(), my.get_mask()) + return MaskedTensor(new_data, new_mask) + + +@register_dispatch_func([torch.ops.aten._to_sparse]) +def _to_sparse(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0) + if not torch.is_tensor(args[0]): + raise TypeError("__torch_dispatch__, {func}: expected args[0] to be a tensor") + mt = args[0] + if not is_masked_tensor(mt): + mt = MaskedTensor(mt, torch.ones_like(mt, dtype=torch.bool)) + if mt.is_sparse_coo(): + return mt + new_mask = func(_maybe_get_mask(args[0])).coalesce() + new_data = _get_data(args[0]).sparse_mask(new_mask) + return MaskedTensor(new_data, new_mask) + + +@register_dispatch_func([torch.ops.aten._to_sparse_csr]) +def _to_sparse_csr(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0) + if not torch.is_tensor(args[0]): + raise ValueError("__torch_dispatch__, {func}: expected args[0] to be a tensor") + mt = args[0] + if not is_masked_tensor(mt): + mt = MaskedTensor(mt, torch.ones_like(mt).bool()) + if mt.is_sparse_csr(): + return mt + new_mask = func(_maybe_get_mask(args[0])) + new_data = _get_data(args[0]).sparse_mask(new_mask) + return MaskedTensor(new_data, new_mask) + + +@register_dispatch_func([torch.ops.aten._to_dense]) +def _to_dense(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0) + if not torch.is_tensor(args[0]): + raise ValueError("__torch_dispatch__, {func}: expected args[0] to be a tensor") + mt = args[0] + if not is_masked_tensor(mt): + mt = MaskedTensor(mt, torch.ones_like(mt).bool()) + new_data = func(_get_data(args[0])) + new_mask = func(_maybe_get_mask(args[0])) + return MaskedTensor(new_data, new_mask) + + +@register_dispatch_func([torch.ops.aten._indices]) +def _indices(func, *args, **kwargs): + # Assumes data is sparse + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0) + data = _get_data(args[0]).indices() + return MaskedTensor(data, torch.ones_like(data).bool()) + + +@register_dispatch_func([torch.ops.aten._values]) +def _values(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0) + data = _get_data(args[0]).values() + return MaskedTensor(data, torch.ones_like(data).bool()) + + +@register_dispatch_func([torch.ops.aten._sparse_coo_tensor_with_dims_and_tensors]) +def _sparse_coo_tensor_with_dims_and_tensors(func, *args, **kwargs): + new_args = list(args) + if is_masked_tensor(args[-1]): + new_args[-1] = args[-1].get_data() + if is_masked_tensor(args[-2]): + new_args[-2] = args[-2].get_data() + + new_data = func(*new_args, **kwargs) + new_args[-1] = torch.ones_like(new_args[-1]) + new_mask = func(*new_args, **kwargs).bool() + + return MaskedTensor(new_data, new_mask) + + +@register_dispatch_func([torch.ops.aten.is_same_size]) +def is_same_size(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=2) + return _get_data(args[0]).is_same_size(_get_data(args[1])) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/binary.py b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/binary.py new file mode 100644 index 0000000000000000000000000000000000000000..087ea95916e54ee925b50a6466693a735a8717d9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/binary.py @@ -0,0 +1,192 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import torch + +from .core import _map_mt_args_kwargs, _masks_match, _tensors_match, _wrap_result, is_masked_tensor + +__all__ = [] # type: ignore[var-annotated] + +BINARY_NAMES = [ + "add", + "atan2", + "arctan2", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "bitwise_left_shift", + "bitwise_right_shift", + "div", + "divide", + "floor_divide", + "fmod", + "logaddexp", + "logaddexp2", + "mul", + "multiply", + "nextafter", + "remainder", + "sub", + "subtract", + "true_divide", + "eq", + "ne", + "le", + "ge", + "greater", + "greater_equal", + "gt", + "less_equal", + "lt", + "less", + "maximum", + "minimum", + "fmax", + "fmin", + "not_equal", +] + +INPLACE_BINARY_NAMES = [ + n + "_" + for n in ( + list( + set(BINARY_NAMES) + - { + "logaddexp", + "logaddexp2", + "equal", + "fmin", + "minimum", + "maximum", + "fmax", + } + ) + ) +] + + +def _get_at_least_one_mask(a, b): + if not is_masked_tensor(a) and not is_masked_tensor(b): + raise TypeError("At least one of `a` and `b` must be a MaskedTensor") + if not _masks_match(a, b): + raise ValueError("a and b must have matching masks") + if is_masked_tensor(a): + return a.get_mask() + return b.get_mask() + + +def _binary_helper(fn, args, kwargs, inplace): + if len(kwargs) != 0: + raise ValueError("len(kwargs) must equal 0") + for a in args[2:]: + if torch.is_tensor(a): + raise TypeError("MaskedTensor binary ops do not support Tensor arguments aside from the lhs and rhs") + + if not _masks_match(*args[:2]): + raise ValueError( + "Input masks must match. If you need support for this, please open an issue on Github." + ) + + data_args, data_kwargs = _map_mt_args_kwargs( + args, kwargs, lambda x: x.get_data() + ) + mask_args, mask_kwargs = _map_mt_args_kwargs( + args, kwargs, lambda x: x.get_mask() + ) + + args0_layout = data_args[0].layout + same_layout = ( + (torch.is_tensor(data_args[1]) or is_masked_tensor(data_args[1])) and + (args0_layout == data_args[1].layout) + ) + + if args0_layout == torch.sparse_coo: + if same_layout: + if not _tensors_match(data_args[0].indices(), data_args[1].indices()): + raise ValueError( + "sparse_coo indices must match. If you need support for this, please open an issue on Github." + ) + if data_args[0].size() != data_args[1].size(): + raise ValueError("input1 and input2 must have the same size for binary functions.") + + data_args[1] = data_args[1].values() + + i = data_args[0].indices() + size = data_args[0].size() + data_args[0] = data_args[0].values() + v = fn(*data_args) + result_data = torch.sparse_coo_tensor(i, v, size) + + elif args0_layout == torch.sparse_csr: + if same_layout: + if not ( + _tensors_match(data_args[0].crow_indices(), data_args[1].crow_indices()) + and _tensors_match( + data_args[0].col_indices(), data_args[1].col_indices() + ) + ): + raise ValueError( + "sparse_csr indices must match. If you need support for this, please open an issue on Github." + ) + + data_args[1] = data_args[1].values() + + crow = data_args[0].crow_indices() + col = data_args[0].col_indices() + data_args[0] = data_args[0].values() + v = fn(*data_args) + result_data = torch.sparse_csr_tensor(crow, col, v) + + else: + result_data = fn(*data_args) + + if inplace: + args[0]._set_data_mask(result_data, mask_args[0]) + return args[0] + else: + result_mask = _get_at_least_one_mask(*args[:2]) + # sparse tensors don't have strides so we can only expand if the layout is strided + if args0_layout == torch.strided: + result_mask = result_mask.expand_as(result_data) + return _wrap_result(result_data, result_mask) + + +def _torch_binary(fn_name): + fn = getattr(torch.ops.aten, fn_name) + + def binary_fn(*args, **kwargs): + return _binary_helper(fn, args, kwargs, inplace=False) + + return binary_fn + + +def _torch_inplace_binary(fn_name): + fn = getattr(torch.ops.aten, fn_name) + + def binary_fn(*args, **kwargs): + return _binary_helper(fn, args, kwargs, inplace=True) + + return binary_fn + + +NATIVE_BINARY_MAP = { + getattr(torch.ops.aten, name): _torch_binary(name) for name in BINARY_NAMES +} +NATIVE_INPLACE_BINARY_MAP = { + getattr(torch.ops.aten, name): _torch_inplace_binary(name) + for name in INPLACE_BINARY_NAMES +} + +NATIVE_BINARY_FNS = list(NATIVE_BINARY_MAP.keys()) +NATIVE_INPLACE_BINARY_FNS = list(NATIVE_INPLACE_BINARY_MAP.keys()) + + +def _is_native_binary(fn): + return fn in NATIVE_BINARY_FNS or fn in NATIVE_INPLACE_BINARY_FNS + + +def _apply_native_binary(fn, *args, **kwargs): + if fn in NATIVE_BINARY_FNS: + return NATIVE_BINARY_MAP[fn](*args, **kwargs) + if fn in NATIVE_INPLACE_BINARY_FNS: + return NATIVE_INPLACE_BINARY_MAP[fn](*args, **kwargs) + return NotImplemented diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/core.py b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/core.py new file mode 100644 index 0000000000000000000000000000000000000000..d2002048edd995e0d3bcd28f8a2349548a2ba80e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/core.py @@ -0,0 +1,336 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import warnings + +import torch +from torch.overrides import get_default_nowrap_functions + + +__all__ = [ + "MaskedTensor", + "is_masked_tensor", +] + + +def is_masked_tensor(a): + r""" Returns True if the input is a MaskedTensor, else False + + Args: + a: any input + + Examples: + + >>> # xdoctest: +SKIP + >>> from torch.masked import MaskedTensor + >>> data = torch.arange(6).reshape(2,3) + >>> mask = torch.tensor([[True, False, False], [True, True, False]]) + >>> mt = MaskedTensor(data, mask) + >>> is_masked_tensor(mt) + True + """ + return isinstance(a, MaskedTensor) + + +def _tensors_match(a, b, exact=True, rtol=1e-05, atol=1e-08): + if is_masked_tensor(a) or is_masked_tensor(b): + raise ValueError("Neither `a` nor `b` can be a MaskedTensor.") + if a.layout != b.layout: + raise ValueError(f"`a` and `b` must have the same layout. Got {a.layout} and {b.layout}") + + if a.dtype != b.dtype: + b = b.type(a.dtype) + if a.layout == b.layout == torch.sparse_coo: + return _tensors_match(a.values(), b.values(), exact) and _tensors_match( + a.indices(), b.indices(), exact + ) + elif a.layout == b.layout == torch.sparse_csr: + return ( + _tensors_match(a.crow_indices(), b.crow_indices(), exact) + and _tensors_match(a.col_indices(), b.col_indices(), exact) + and _tensors_match(a.values(), b.values(), exact) + ) + if exact: + return (a.dim() == b.dim()) and torch.eq(a, b).all().item() + return (a.dim() == b.dim()) and torch.allclose(a, b, rtol=rtol, atol=atol) + + +def _masks_match(a, b): + if is_masked_tensor(a) and is_masked_tensor(b): + mask_a = a.get_mask() + mask_b = b.get_mask() + return _tensors_match(mask_a, mask_b, exact=True) + return True + + +def _map_mt_args_kwargs(args, kwargs, map_fn): + def _helper(a, map_fn): + if is_masked_tensor(a): + return map_fn(a) + elif torch.is_tensor(a): + return a + elif isinstance(a, list): + a_impl, _ = _map_mt_args_kwargs(a, {}, map_fn) + return a_impl + elif isinstance(a, tuple): + a_impl, _ = _map_mt_args_kwargs(a, {}, map_fn) + return tuple(a_impl) + else: + return a + + if kwargs is None: + kwargs = {} + impl_args = [] + for a in args: + impl_args.append(_helper(a, map_fn)) + impl_kwargs = {} + for k in kwargs.keys(): + impl_kwargs[k] = _helper(a, map_fn) + return impl_args, impl_kwargs + + +def _wrap_result(result_data, result_mask): + if isinstance(result_data, list): + return [_wrap_result(r, m) for (r, m) in zip(result_data, result_mask)] + if isinstance(result_data, tuple): + return tuple(_wrap_result(r, m) for (r, m) in zip(result_data, result_mask)) + if torch.is_tensor(result_data): + return MaskedTensor(result_data, result_mask) + # Expect result_data and result_mask to be Tensors only + return NotImplemented + + +def _masked_tensor_str(data, mask, formatter): + if data.layout in {torch.sparse_coo, torch.sparse_csr}: + data = data.to_dense() + mask = mask.to_dense() + if data.dim() == 1: + formatted_elements = [ + formatter.format(d.item()) if isinstance(d.item(), float) else str(d.item()) + for d in data + ] + max_len = max( + 8 if x[1] else len(x[0]) for x in zip(formatted_elements, ~mask) + ) + return ( + "[" + + ", ".join( + [ + "--".rjust(max_len) if m else e + for (e, m) in zip(formatted_elements, ~mask) + ] + ) + + "]" + ) + sub_strings = [_masked_tensor_str(d, m, formatter) for (d, m) in zip(data, mask)] + sub_strings = ["\n".join([" " + si for si in s.split("\n")]) for s in sub_strings] + return "[\n" + ",\n".join(sub_strings) + "\n]" + + +def _get_data(a): + if is_masked_tensor(a): + return a._masked_data + return a + + +def _maybe_get_mask(a): + if is_masked_tensor(a): + return a.get_mask() + return None + + +class MaskedTensor(torch.Tensor): + @staticmethod + def __new__(cls, data, mask, requires_grad=False): + if is_masked_tensor(data) or not torch.is_tensor(data): + raise TypeError("data must be a Tensor") + if is_masked_tensor(mask) or not torch.is_tensor(mask): + raise TypeError("mask must be a Tensor") + # Use a Tensor that of the give size for the wrapper. + kwargs = {} + kwargs["device"] = data.device + kwargs["dtype"] = data.dtype + kwargs["layout"] = data.layout + kwargs["requires_grad"] = requires_grad + kwargs["dispatch_sizes_strides_policy"] = "strides" + kwargs["dispatch_layout"] = True + warnings.warn(("The PyTorch API of MaskedTensors is in prototype stage " + "and will change in the near future. Please open a Github issue " + "for features requests and see our documentation on the torch.masked " + "module for further information about the project."), UserWarning) + if data.requires_grad: + warnings.warn("It is not recommended to create a MaskedTensor with a tensor that requires_grad. " + "To avoid this, you can use data.clone().detach()", UserWarning) + return torch.Tensor._make_wrapper_subclass(cls, data.size(), **kwargs) # type: ignore[attr-defined] + + def _preprocess_data(self, data, mask): + from .._ops import _sparse_coo_where, _sparse_csr_where + + if data.layout != mask.layout: + raise TypeError("data and mask must have the same layout.") + if data.layout == torch.sparse_coo: + data = data.coalesce() + mask = mask.coalesce() + if data._nnz() != mask._nnz(): + data = _sparse_coo_where(mask, data, torch.tensor(0)) + elif data.layout == torch.sparse_csr: + if data._nnz() != mask._nnz(): + data = _sparse_csr_where(mask, data, torch.tensor(0)) + + # Have to pick awkward names to not conflict with existing fields such as data + self._masked_data = data.clone() + self._masked_mask = mask.clone() + + def _validate_members(self): + data = self._masked_data + mask = self.get_mask() + if type(data) != type(mask): + raise TypeError(f"data and mask must have the same type. Got {type(data)} and {type(mask)}") + if data.layout not in {torch.strided, torch.sparse_coo, torch.sparse_csr}: + raise TypeError(f"data layout of {data.layout} is not supported.") + if data.layout == torch.sparse_coo: + if not _tensors_match(data.indices(), mask.indices(), exact=True): + raise ValueError("data and mask are both sparse COO tensors but do not have the same indices.") + elif data.layout == torch.sparse_csr: + if not _tensors_match( + data.crow_indices(), mask.crow_indices(), exact=True + ) or not _tensors_match(data.col_indices(), mask.col_indices(), exact=True): + raise ValueError("data and mask are both sparse CSR tensors but do not share either crow or col indices.") + if mask.dtype != torch.bool: + raise TypeError("mask must have dtype bool.") + if not ( + data.dtype == torch.float16 + or data.dtype == torch.float32 + or data.dtype == torch.float64 + or data.dtype == torch.bool + or data.dtype == torch.int8 + or data.dtype == torch.int16 + or data.dtype == torch.int32 + or data.dtype == torch.int64 + ): + raise TypeError(f"{data.dtype} is not supported in MaskedTensor.") + if data.dim() != mask.dim(): + raise ValueError("data.dim() must equal mask.dim()") + if data.size() != mask.size(): + raise ValueError("data.size() must equal mask.size()") + + def __init__(self, data, mask, requires_grad=False): + self._preprocess_data(data, mask) + self._validate_members() + + @staticmethod + def _from_values(data, mask): + """ Differentiable constructor for MaskedTensor """ + class Constructor(torch.autograd.Function): + @staticmethod + def forward(ctx, data, mask): + return MaskedTensor(data, mask) + + @staticmethod + def backward(ctx, grad_output): + return grad_output, None + + result = Constructor.apply(data, mask) + return result + + def _set_data_mask(self, data, mask): + self._masked_data = data + self._masked_mask = mask + self._validate_members() + + def __repr__(self): + formatter = "{0:8.4f}" + if self.dim() == 0: + scalar_data = self.get_data().item() + data_formatted = ( + formatter.format(scalar_data) + if isinstance(scalar_data, float) + else str(scalar_data) + ) + if not self.get_mask().item(): + data_formatted = "--" + return ( + "MaskedTensor(" + + data_formatted + + ", " + + str(self.get_mask().item()) + + ")" + ) + s = _masked_tensor_str(self.get_data(), self.get_mask(), formatter) + s = "\n".join(" " + si for si in s.split("\n")) + return "MaskedTensor(\n" + s + "\n)" + + # Seems like this needs to be defined before torch_dispatch to work + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + + from ._ops_refs import _MASKEDTENSOR_FUNCTION_TABLE + if func in _MASKEDTENSOR_FUNCTION_TABLE: + return _MASKEDTENSOR_FUNCTION_TABLE[func](*args, **kwargs) + + if not all(issubclass(cls, t) for t in types): + return NotImplemented + with torch._C.DisableTorchFunctionSubclass(): + ret = func(*args, **kwargs) + if func in get_default_nowrap_functions(): + return ret + else: + return torch._tensor._convert(ret, cls) + + @classmethod + def unary(cls, fn, data, mask): + return MaskedTensor(fn(data), mask) + + @classmethod + def __torch_dispatch__(cls, func, types, args, kwargs): + func = func.overloadpacket + + from ._ops_refs import _MASKEDTENSOR_DISPATCH_TABLE + if func in _MASKEDTENSOR_DISPATCH_TABLE: + return _MASKEDTENSOR_DISPATCH_TABLE[func](*args, **kwargs) + + msg = ( + f"{func.__name__} is not implemented in __torch_dispatch__ for MaskedTensor.\n" + "If you would like this operator to be supported, please file an issue for a feature request at " + "https://github.com/pytorch/maskedtensor/issues with a minimal reproducible code snippet.\n" + "In the case that the semantics for the operator are not trivial, it would be appreciated " + "to also include a proposal for the semantics." + ) + warnings.warn(msg) + return NotImplemented + + def __lt__(self, other): + if is_masked_tensor(other): + return MaskedTensor(self.get_data() < _get_data(other), self.get_mask()) + return MaskedTensor(self.get_data() < other, self.get_mask()) + + def to_tensor(self, value): + return self.get_data().masked_fill(~self.get_mask(), value) + + def get_data(self): + class GetData(torch.autograd.Function): + @staticmethod + def forward(ctx, self): + return self._masked_data + + @staticmethod + def backward(ctx, grad_output): + if is_masked_tensor(grad_output): + return grad_output + return MaskedTensor(grad_output, self.get_mask()) + + return GetData.apply(self) + + def get_mask(self): + return self._masked_mask + + def is_sparse_coo(self): + return self.layout == torch.sparse_coo + + def is_sparse_csr(self): + return self.layout == torch.sparse_csr + + # Update later to support more sparse layouts + @property + def is_sparse(self): + return self.is_sparse_coo() or self.is_sparse_csr() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/creation.py b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/creation.py new file mode 100644 index 0000000000000000000000000000000000000000..861984a21e1c436ef738c71b96fb1b4534f61583 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/creation.py @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +from .core import MaskedTensor + +__all__ = [ + "as_masked_tensor", + "masked_tensor", +] + + +"""" +These two factory functions are intended to mirror + torch.tensor - guaranteed to be a leaf node + torch.as_tensor - differentiable constructor that preserves the autograd history +""" + +def masked_tensor(data, mask, requires_grad=False): + return MaskedTensor(data, mask, requires_grad) + +def as_masked_tensor(data, mask): + return MaskedTensor._from_values(data, mask) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/passthrough.py b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/passthrough.py new file mode 100644 index 0000000000000000000000000000000000000000..91c9e5f81830e953b2d7c6ebc58f05e4c7fe1ecf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/passthrough.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +""" +These are functions that should simply be applied to both mask and data. +Take select or stack as an example. This operation can be applied to +both the mask and data of a MaskedTensor and the result wrapped into +a new MaskedTensor as a result. +""" + +import torch + +from .core import _map_mt_args_kwargs, _wrap_result + +__all__ = [] # type: ignore[var-annotated] + + +PASSTHROUGH_FNS = [ + torch.ops.aten.select, + torch.ops.aten.transpose, + torch.ops.aten.split, + torch.ops.aten.t, + torch.ops.aten.slice, + torch.ops.aten.slice_backward, + torch.ops.aten.select_backward, + torch.ops.aten.index, + torch.ops.aten.expand, + torch.ops.aten.view, + torch.ops.aten._unsafe_view, + torch.ops.aten._reshape_alias, + torch.ops.aten.cat, + torch.ops.aten.unsqueeze, +] + + +def _is_pass_through_fn(fn): + return fn in PASSTHROUGH_FNS + + +def _apply_pass_through_fn(fn, *args, **kwargs): + data_args, data_kwargs = _map_mt_args_kwargs(args, kwargs, lambda x: x.get_data()) + result_data = fn(*data_args, **data_kwargs) + mask_args, mask_kwargs = _map_mt_args_kwargs(args, kwargs, lambda x: x.get_mask()) + result_mask = fn(*mask_args, **mask_kwargs) + return _wrap_result(result_data, result_mask) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/reductions.py b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/reductions.py new file mode 100644 index 0000000000000000000000000000000000000000..737f4b240beb91bca5b8b5fe46cc45dd4dce9c63 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/reductions.py @@ -0,0 +1,173 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import warnings + +import torch + +from .core import is_masked_tensor +from .creation import as_masked_tensor, masked_tensor + +__all__ = [] # type: ignore[var-annotated] + + +def _masked_all_all(data, mask=None): + if mask is None: + return data.all() + return data.masked_fill(~mask, True).all() + + +def _masked_all_dim(data, dim, keepdim=False, mask=None): + if mask is None: + return torch.all(data, dim=dim, keepdim=keepdim) + return torch.all(data.masked_fill(~mask, True), dim=dim, keepdim=keepdim) + + +def _masked_all(*args, **kwargs): + if len(args) == 1 and len(kwargs) == 1: + return _masked_all_all(args[0], mask=kwargs["mask"]) + return _masked_all_dim(*args, **kwargs) + + +def _multidim_any(mask, dim, keepdim): + if isinstance(dim, int): + return _multidim_any(mask, [dim], keepdim) + for d in sorted(dim, reverse=True): + mask = torch.any(mask, dim=d, keepdim=keepdim) + return mask + + +def _get_masked_fn(fn): + if fn == "all": + return _masked_all + return getattr(torch.masked, fn) + + +def _torch_reduce_all(fn): + def reduce_all(self): + masked_fn = _get_masked_fn(fn) + data = self.get_data() + mask = self.get_mask().values() if self.is_sparse else self.get_mask() + # When reduction is "all", then torch.argmin/torch.argmax needs to return the index of the + # element corresponding to the min/max, but this operation isn't supported correctly for sparse layouts. + # Therefore, this implementation calculates it using the strides. + if fn == "all": + result_data = masked_fn(data, mask=mask) + + elif fn in {"argmin", "argmax"} and self.is_sparse_coo(): + sparse_idx = masked_fn(data.values(), mask=mask).to(dtype=torch.int) + indices = ( + data.to_sparse_coo().indices() + if not self.is_sparse_coo() + else data.indices() + ) + idx = indices.unbind(1)[sparse_idx] + stride = data.size().numel() / torch.tensor( + data.size(), device=data.device + ).cumprod(0) + result_data = torch.sum(idx * stride) + + # we simply pass in the values for sparse COO/CSR tensors + elif self.is_sparse: + result_data = masked_fn(masked_tensor(data.values(), mask)) + + else: + result_data = masked_fn(self, mask=mask) + + return as_masked_tensor(result_data, torch.any(mask)) + + return reduce_all + + +def _torch_reduce_dim(fn): + def reduce_dim(self, dim, keepdim=False, dtype=None): + if self.is_sparse: + msg = ( + f"The sparse version of {fn} is not implemented in reductions.\n" + "If you would like this operator to be supported, please file an issue for a feature request at " + "https://github.com/pytorch/maskedtensor/issues with a minimal reproducible code snippet.\n" + "In the case that the semantics for the operator are not trivial, it would be appreciated " + "to also include a proposal for the semantics." + ) + warnings.warn(msg) + return NotImplemented + if not is_masked_tensor(self): + raise TypeError("Input to reduce_dim must be a MaskedTensor") + + masked_fn = _get_masked_fn(fn) + data = self.get_data() + mask = self.get_mask() + if fn == "all": + result_data = masked_fn(data, dim=dim, keepdim=keepdim, mask=mask) + else: + result_data = masked_fn( + self, dim=dim, keepdim=keepdim, dtype=dtype, mask=self.get_mask() + ) + return as_masked_tensor(result_data, _multidim_any(mask, dim, keepdim)) + + return reduce_dim + + +def _torch_reduce(fn): + def reduce_fn(*args, **kwargs): + if len(args) == 1 and len(kwargs) == 0: + return _torch_reduce_all(fn)(args[0]) + return _torch_reduce_dim(fn)(*args, **kwargs) + + return reduce_fn + + +def _reduce_dim_args(input, dim, keepdim=False, dtype=None): + return input, dim, keepdim, dtype + + +def _torch_grad_reduce(fn): + def grad_reduce(*args, **kwargs): + if len(args) == 1 and len(kwargs) == 0: + return _torch_reduce_all(fn)(args[0]) + # TODO: autograd.Function doesn't support kwarg + input, dim, keepdim, dtype = _reduce_dim_args(*args, **kwargs) + return _torch_reduce_dim(fn)(input, dim, keepdim, dtype) + + return grad_reduce + + +REDUCE_NAMES = [ + "sum", + "mean", + "amin", + "amax", + "argmin", + "argmax", + "prod", + "all", + "norm", + "var", + "std", +] + +NATIVE_REDUCE_MAP = { + getattr(torch.ops.aten, name): _torch_reduce(name) for name in REDUCE_NAMES +} +TORCH_REDUCE_MAP = { + getattr(torch, name): _torch_grad_reduce(name) for name in REDUCE_NAMES +} +TENSOR_REDUCE_MAP = { + getattr(torch.Tensor, name): _torch_grad_reduce(name) for name in REDUCE_NAMES +} + +NATIVE_REDUCE_FNS = list(NATIVE_REDUCE_MAP.keys()) +TORCH_REDUCE_FNS = list(TORCH_REDUCE_MAP.keys()) +TENSOR_REDUCE_FNS = list(TENSOR_REDUCE_MAP.keys()) + +def _is_reduction(fn): + return fn in NATIVE_REDUCE_MAP or fn in TORCH_REDUCE_MAP or fn in TENSOR_REDUCE_MAP + + +def _apply_reduction(fn, *args, **kwargs): + if fn in NATIVE_REDUCE_MAP: + return NATIVE_REDUCE_MAP[fn](*args, **kwargs) + if fn in TORCH_REDUCE_MAP: + return TORCH_REDUCE_MAP[fn](*args, **kwargs) + if fn in TENSOR_REDUCE_MAP: + return TENSOR_REDUCE_MAP[fn](*args, **kwargs) + return NotImplemented diff --git a/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/unary.py b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/unary.py new file mode 100644 index 0000000000000000000000000000000000000000..b3d5c136bfd4149810d25c36fd18b34d7a0a67c5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/masked/maskedtensor/unary.py @@ -0,0 +1,188 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import torch + +from .core import _map_mt_args_kwargs, _wrap_result + +__all__ = [] # type: ignore[var-annotated] + + +UNARY_NAMES = [ + "abs", + "absolute", + "acos", + "arccos", + "acosh", + "arccosh", + "angle", + "asin", + "arcsin", + "asinh", + "arcsinh", + "atan", + "arctan", + "atanh", + "arctanh", + "bitwise_not", + "ceil", + "clamp", + "clip", + "conj_physical", + "cos", + "cosh", + "deg2rad", + "digamma", + "erf", + "erfc", + "erfinv", + "exp", + "exp2", + "expm1", + "fix", + "floor", + "frac", + "lgamma", + "log", + "log10", + "log1p", + "log2", + "logit", + "i0", + "isnan", + "nan_to_num", + "neg", + "negative", + "positive", + "pow", + "rad2deg", + "reciprocal", + "round", + "rsqrt", + "sigmoid", + "sign", + "sgn", + "signbit", + "sin", + "sinc", + "sinh", + "sqrt", + "square", + "tan", + "tanh", + "trunc", +] + +INPLACE_UNARY_NAMES = [ + n + "_" + for n in (list(set(UNARY_NAMES) - {"angle", "positive", "signbit", "isnan"})) +] + +# Explicitly tracking functions we know are currently not supported +# This might be due to missing code gen or because of complex semantics +UNARY_NAMES_UNSUPPORTED = [ + "atan2", + "arctan2", + "bitwise_left_shift", + "bitwise_right_shift", + "copysign", + "float_power", + "fmod", + "frexp", + "gradient", + "imag", + "ldexp", + "lerp", + "logical_not", + "hypot", + "igamma", + "igammac", + "mvlgamma", + "nextafter", + "polygamma", + "real", + "remainder", + "true_divide", + "xlogy", +] + + +def _unary_helper(fn, args, kwargs, inplace): + if len(kwargs) != 0: + raise ValueError("MaskedTensor unary ops require that len(kwargs) == 0. " + "If you need support for this, please open an issue on Github.") + for a in args[1:]: + if torch.is_tensor(a): + raise TypeError("MaskedTensor unary ops do not support additional Tensor arguments") + + mask_args, mask_kwargs = _map_mt_args_kwargs( + args, kwargs, lambda x: x._masked_mask + ) + data_args, data_kwargs = _map_mt_args_kwargs( + args, kwargs, lambda x: x._masked_data + ) + + if args[0].layout == torch.sparse_coo: + data_args[0] = data_args[0].coalesce() + s = data_args[0].size() + i = data_args[0].indices() + data_args[0] = data_args[0].coalesce().values() + v = fn(*data_args) + result_data = torch.sparse_coo_tensor(i, v, size=s) + + elif args[0].layout == torch.sparse_csr: + crow = data_args[0].crow_indices() + col = data_args[0].col_indices() + data_args[0] = data_args[0].values() + v = fn(*data_args) + result_data = torch.sparse_csr_tensor(crow, col, v) + + else: + result_data = fn(*data_args) + + if inplace: + args[0]._set_data_mask(result_data, mask_args[0]) + return args[0] + else: + return _wrap_result(result_data, mask_args[0]) + + +def _torch_unary(fn_name): + fn = getattr(torch.ops.aten, fn_name) + + def unary_fn(*args, **kwargs): + return _unary_helper(fn, args, kwargs, inplace=False) + + return unary_fn + + +def _torch_inplace_unary(fn_name): + fn = getattr(torch.ops.aten, fn_name) + + def unary_fn(*args, **kwargs): + return _unary_helper(fn, args, kwargs, inplace=True) + + return unary_fn + + +NATIVE_UNARY_MAP = { + getattr(torch.ops.aten, name): _torch_unary(name) for name in UNARY_NAMES +} +NATIVE_INPLACE_UNARY_MAP = { + getattr(torch.ops.aten, name): _torch_inplace_unary(name) + for name in INPLACE_UNARY_NAMES +} + +NATIVE_UNARY_FNS = list(NATIVE_UNARY_MAP.keys()) +NATIVE_INPLACE_UNARY_FNS = list(NATIVE_INPLACE_UNARY_MAP.keys()) + + +def _is_native_unary(fn): + return fn in NATIVE_UNARY_FNS or fn in NATIVE_INPLACE_UNARY_FNS + + +def _apply_native_unary(fn, *args, **kwargs): + if fn in NATIVE_UNARY_FNS: + return NATIVE_UNARY_MAP[fn](*args, **kwargs) + if fn in NATIVE_INPLACE_UNARY_FNS: + return NATIVE_INPLACE_UNARY_MAP[fn](*args, **kwargs) + return NotImplemented diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/adamw.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/adamw.py new file mode 100644 index 0000000000000000000000000000000000000000..17d72c66c6db51ab9ad8572ce9936927f3581770 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/adamw.py @@ -0,0 +1,687 @@ +import torch +from torch import Tensor +from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _dispatch_sqrt, + _stack_if_compiling, _capturable_doc, _differentiable_doc, _foreach_doc, + _fused_doc, _maximize_doc, _default_to_fused_or_foreach, ParamsT, _view_as_real) +from typing import List, Optional, Tuple, Union +from torch.utils._foreach_utils import _get_fused_kernels_supported_devices + +__all__ = ["AdamW", "adamw"] + + +class AdamW(Optimizer): + def __init__( + self, + params: ParamsT, + lr: Union[float, Tensor] = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 1e-2, + amsgrad: bool = False, + *, + maximize: bool = False, + foreach: Optional[bool] = None, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if isinstance(lr, Tensor) and foreach and not capturable: + raise ValueError("lr as a Tensor is not supported for capturable=False and foreach=True") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + amsgrad=amsgrad, + foreach=foreach, + maximize=maximize, + capturable=capturable, + differentiable=differentiable, + fused=fused, + ) + super().__init__(params, defaults) + + if fused: + if differentiable: + raise RuntimeError("`fused` does not support `differentiable`") + self._step_supports_amp_scaling = True + # TODO(crcrpar): [low prec params & their higher prec copy] + # Suppor AMP with FP16/BF16 model params which would need + # higher prec copy of params to do update math in higher prec to + # alleviate the loss of information. + fused_supported_devices = _get_fused_kernels_supported_devices() + if not all( + p.device.type in fused_supported_devices and + torch.is_floating_point(p) + for pg in self.param_groups for p in pg['params'] + ): + raise RuntimeError("`fused=True` requires all the params to be floating point Tensors of " + f"supported devices: {fused_supported_devices}.") + if foreach: + raise RuntimeError("`fused` and `foreach` cannot be `True` together.") + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("amsgrad", False) + group.setdefault("maximize", False) + group.setdefault("foreach", None) + group.setdefault("capturable", False) + group.setdefault("differentiable", False) + group.setdefault("fused", None) + state_values = list(self.state.values()) + step_is_tensor = (len(state_values) != 0) and torch.is_tensor( + state_values[0]["step"] + ) + if not step_is_tensor: + for s in state_values: + s["step"] = torch.tensor(float(s["step"]), dtype=torch.float32) + + def _init_group( + self, + group, + params_with_grad, + grads, + amsgrad, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + ): + has_complex = False + for p in group["params"]: + if p.grad is None: + continue + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError("AdamW does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + # note(crcrpar): Deliberately host `step` on CPU if both capturable and fused are off. + # This is because kernel launches are costly on CUDA and XLA. + state["step"] = ( + torch.zeros((), dtype=torch.float32, device=p.device) + if group["capturable"] or group["fused"] + else torch.tensor(0.0, dtype=torch.float32) + ) + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state["max_exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + + if group['amsgrad']: + max_exp_avg_sqs.append(state["max_exp_avg_sq"]) + if group['differentiable'] and state['step'].requires_grad: + raise RuntimeError('`requires_grad` is not supported for `step` in differentiable mode') + + # Foreach without capturable does not support a tensor lr + if group['foreach'] and isinstance(group['lr'], Tensor) and not group['capturable']: + raise RuntimeError('lr as a Tensor is not supported for capturable=False and foreach=True') + + state_steps.append(state["step"]) + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + max_exp_avg_sqs = [] + state_steps = [] + amsgrad = group["amsgrad"] + beta1, beta2 = group["betas"] + + has_complex = self._init_group( + group, + params_with_grad, + grads, + amsgrad, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + ) + + adamw( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=amsgrad, + beta1=beta1, + beta2=beta2, + lr=group["lr"], + weight_decay=group["weight_decay"], + eps=group["eps"], + maximize=group["maximize"], + foreach=group["foreach"], + capturable=group["capturable"], + differentiable=group["differentiable"], + fused=group["fused"], + grad_scale=getattr(self, "grad_scale", None), + found_inf=getattr(self, "found_inf", None), + has_complex=has_complex, + ) + + return loss + + +AdamW.__doc__ = r"""Implements AdamW algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2 + \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)}, + \: \epsilon \text{ (epsilon)} \\ + &\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad}, + \: \textit{maximize} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0 + \text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + + &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ + &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ + &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ + &\hspace{5mm}\textbf{if} \: amsgrad \\ + &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max}, + \widehat{v_t}) \\ + &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_. + """ + fr""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR + is not yet supported for all our implementations. Please use a float + LR if you are not also specifying fused=True or capturable=True. + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (bool, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + {_maximize_doc} + {_foreach_doc} + {_capturable_doc} + {_differentiable_doc} + {_fused_doc} + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + + """ + + +def adamw( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None, + grad_scale: Optional[Tensor] = None, + found_inf: Optional[Tensor] = None, + has_complex: bool = False, + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, +): + r"""Functional API that performs AdamW algorithm computation. + + See :class:`~torch.optim.AdamW` for details. + """ + if not torch._utils.is_compiling() and not all(isinstance(t, torch.Tensor) for t in state_steps): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + # Respect when the user inputs False/True for foreach or fused. We only want to change + # the default when neither have been user-specified. Note that we default to foreach + # and pass False to use_fused. This is not a mistake--we want to give the fused impl + # bake-in time before making it the default, even if it is typically faster. + if fused is None and foreach is None: + _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) + # Do not flip on foreach for the unsupported case where lr is a Tensor and capturable=False. + if foreach and isinstance(lr, Tensor) and not capturable: + foreach = False + if fused is None: + fused = False + if foreach is None: + foreach = False + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + if fused and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with fused optimizers") + + if fused and not torch.jit.is_scripting(): + func = _fused_adamw + elif foreach and not torch.jit.is_scripting(): + func = _multi_tensor_adamw + else: + func = _single_tensor_adamw + + func( + params, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=amsgrad, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + capturable=capturable, + differentiable=differentiable, + grad_scale=grad_scale, + found_inf=found_inf, + has_complex=has_complex, + ) + + +def _single_tensor_adamw( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[Tensor, float], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, + differentiable: bool, + has_complex: bool, +): + + assert grad_scale is None and found_inf is None + + if torch.jit.is_scripting(): + # this assert is due to JIT being dumb and not realizing that the ops below + # have overloads to handle both float and Tensor lrs, so we just assert it's + # a float since most people using JIT are using floats + assert isinstance(lr, float) + + for i, param in enumerate(params): + grad = grads[i] if not maximize else -grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + step_t = state_steps[i] + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + assert ( + (param.is_cuda and step_t.is_cuda) or (param.is_xla and step_t.is_xla) + ), "If capturable=True, params and state_steps must be CUDA or XLA tensors." + + if torch.is_complex(param): + grad = torch.view_as_real(grad) + exp_avg = torch.view_as_real(exp_avg) + exp_avg_sq = torch.view_as_real(exp_avg_sq) + if amsgrad: + max_exp_avg_sqs[i] = torch.view_as_real(max_exp_avg_sqs[i]) + param = torch.view_as_real(param) + + # update step + step_t += 1 + + # Perform stepweight decay + param.mul_(1 - lr * weight_decay) + + # Decay the first and second moment running average coefficient + exp_avg.lerp_(grad, 1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + if capturable or differentiable: + step = step_t + + bias_correction1 = 1 - beta1 ** step + bias_correction2 = 1 - beta2 ** step + + step_size = lr / bias_correction1 + step_size_neg = step_size.neg() + + bias_correction2_sqrt = bias_correction2.sqrt() + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + if differentiable: + max_exp_avg_sq = max_exp_avg_sqs[i].clone() + else: + max_exp_avg_sq = max_exp_avg_sqs[i] + + max_exp_avg_sqs[i].copy_(torch.maximum(max_exp_avg_sq, exp_avg_sq)) + + # Uses the max. for normalizing running avg. of gradient + # Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write + # (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor) + denom = ( + max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg) + ).add_(eps / step_size_neg) + else: + denom = ( + exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg) + ).add_(eps / step_size_neg) + + param.addcdiv_(exp_avg, denom) + else: + step = _get_value(step_t) + + bias_correction1 = 1 - beta1 ** step + bias_correction2 = 1 - beta2 ** step + + step_size = lr / bias_correction1 + + bias_correction2_sqrt = _dispatch_sqrt(bias_correction2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i]) + + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sqs[i].sqrt() / bias_correction2_sqrt).add_(eps) + else: + denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) + + param.addcdiv_(exp_avg, denom, value=-step_size) + + # Lastly, switch back to complex view + if amsgrad and torch.is_complex(params[i]): + max_exp_avg_sqs[i] = torch.view_as_complex(max_exp_avg_sqs[i]) + + +def _multi_tensor_adamw( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[Tensor, float], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, + differentiable: bool, + has_complex: bool, +): + if len(params) == 0: + return + + if isinstance(lr, Tensor) and not capturable: + raise RuntimeError("lr as a Tensor is not supported for capturable=False and foreach=True") + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + assert all( + p.is_cuda and step.is_cuda for p, step in zip(params, state_steps) + ), "If capturable=True, params and state_steps must be CUDA tensors." + + assert not differentiable, "_foreach ops don't support autograd" + + assert grad_scale is None and found_inf is None + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([ + params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]) + for (( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + ), _) in grouped_tensors.values(): + if maximize: + device_grads = torch._foreach_neg(device_grads) + + if has_complex: + if amsgrad: + _view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs, device_max_exp_avg_sqs) + else: + _view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs) + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if device_state_steps[0].is_cpu: + torch._foreach_add_(device_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0) + else: + torch._foreach_add_(device_state_steps, 1) + + # Perform stepweight decay + if weight_decay != 0: + torch._foreach_mul_(device_params, 1 - lr * weight_decay) + + # Decay the first and second moment running average coefficient + torch._foreach_lerp_(device_exp_avgs, device_grads, 1 - beta1) + + torch._foreach_mul_(device_exp_avg_sqs, beta2) + torch._foreach_addcmul_(device_exp_avg_sqs, device_grads, device_grads, 1 - beta2) + + # Delete the local intermediate since it won't be used anymore to save on peak memory + del device_grads + + if capturable: + bias_correction1 = torch._foreach_pow(beta1, device_state_steps) + bias_correction2 = torch._foreach_pow(beta2, device_state_steps) + # foreach_sub doesn't allow a scalar as the first arg + torch._foreach_sub_(bias_correction1, 1) + torch._foreach_sub_(bias_correction2, 1) + # we do not negate bias_correction1 as it'll need to be negated later anyway + torch._foreach_neg_(bias_correction2) + + # foreach_div doesn't allow a scalar as the first arg + torch._foreach_div_(bias_correction1, lr) + torch._foreach_reciprocal_(bias_correction1) + + torch._foreach_sqrt_(bias_correction2) + + # Re-assign for clarity as we maintain minimal intermediates: we'll have + # step_size = - lr / (1 - beta1 ^ t) where t = num_steps + # bias_correction2_sqrt = sqrt(1 - beta2 ^ t) + step_size = bias_correction1 + bias_correction2_sqrt = bias_correction2 + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) + + # Use the max. for normalizing running avg. of gradient + exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs) + else: + exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + torch._foreach_div_(exp_avg_sq_sqrt, step_size) + + # at this point, exp_avg_sq_sqrt = - (1 - beta^t) * [sqrt(exp_avg_sq / (1 - beta2^t)) + eps] / lr + torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt) + else: + bias_correction1 = [1 - beta1 ** _get_value(step) for step in device_state_steps] + bias_correction2 = [1 - beta2 ** _get_value(step) for step in device_state_steps] + + step_size = _stack_if_compiling([(lr / bc) * -1 for bc in bias_correction1]) + + bias_correction2_sqrt = [_dispatch_sqrt(bc) for bc in bias_correction2] + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) + + # Use the max. for normalizing running avg. of gradient + exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs) + else: + exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt, step_size) + + +def _fused_adamw( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, # Needed for consistency. + differentiable: bool, + has_complex: bool, +) -> None: + if not params: + return + if differentiable: + raise RuntimeError("Adam with fused=True does not support differentiable=True") + + grad_scale_dict = {grad_scale.device: grad_scale} if grad_scale is not None else None + found_inf_dict = {found_inf.device: found_inf} if found_inf is not None else None + + # We only shuffle around the lr when it is a Tensor and on CUDA, otherwise, we prefer + # treating it as a scalar. + lr_dict = {lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]) + for (device, _), ((device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps,), _) in grouped_tensors.items(): + device_grad_scale, device_found_inf = None, None + if grad_scale is not None: + if device not in grad_scale_dict: + grad_scale_dict[device] = grad_scale.to(device, non_blocking=True) + device_grad_scale = grad_scale_dict[device] + if found_inf is not None: + if found_inf not in found_inf_dict: + found_inf_dict[device] = found_inf.to(device, non_blocking=True) + device_found_inf = found_inf_dict[device] + if lr_dict is not None and device not in lr_dict: + lr_dict[device] = lr.to(device=device, non_blocking=True) + lr = lr_dict[device] + torch._foreach_add_(device_state_steps, 1) + torch._fused_adamw_( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + amsgrad=amsgrad, + lr=lr, + beta1=beta1, + beta2=beta2, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + grad_scale=device_grad_scale, + found_inf=device_found_inf, + ) + if device_found_inf is not None: + torch._foreach_sub_(device_state_steps, [device_found_inf] * len(device_state_steps)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/asgd.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/asgd.py new file mode 100644 index 0000000000000000000000000000000000000000..104550361527feaae9aa3b4a7214dfb38d1432ba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/asgd.py @@ -0,0 +1,392 @@ +import torch +from torch import Tensor + +from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _default_to_fused_or_foreach, + _differentiable_doc, _foreach_doc, _maximize_doc, _capturable_doc, _view_as_real) +from torch._utils import is_compiling +from typing import List, Optional + +__all__ = ["ASGD", "asgd"] + +def _to_tensor(x, device=None): + if not isinstance(x, torch.Tensor): + return torch.tensor(x, device=device) + + return x + +class ASGD(Optimizer): + def __init__( + self, + params, + lr=1e-2, + lambd=1e-4, + alpha=0.75, + t0=1e6, + weight_decay=0, + foreach: Optional[bool] = None, + maximize: bool = False, + differentiable: bool = False, + capturable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + if foreach is False and capturable: + raise ValueError("Capturable not supported with single tensor ASGD") + + defaults = dict( + lr=lr, + lambd=lambd, + alpha=alpha, + t0=t0, + weight_decay=weight_decay, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + capturable=capturable, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + group.setdefault("capturable", False) + state_values = list(self.state.values()) + step_is_tensor = (len(state_values) != 0) and torch.is_tensor( + state_values[0]["step"] + ) + if not step_is_tensor: + for s in state_values: + s["step"] = torch.tensor(float(s["step"]), dtype=torch.float32) + eta_is_tensor = (len(state_values) != 0) and torch.is_tensor( + state_values[0]["eta"] + ) + if not eta_is_tensor: + for s in state_values: + s["eta"] = torch.tensor(s["eta"], dtype=torch.float32) + mu_is_tensor = (len(state_values) != 0) and torch.is_tensor( + state_values[0]["mu"] + ) + if not mu_is_tensor: + for s in state_values: + s["mu"] = torch.tensor(float(s["mu"]), dtype=torch.float32) + + def _init_group(self, group, params_with_grad, grads, mus, axs, etas, state_steps): + has_complex = False + for p in group["params"]: + if p.grad is not None: + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError("ASGD does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + # State initialization + if len(state) == 0: + state["step"] = torch.zeros((), device=p.device, dtype=torch.float32) + state["eta"] = torch.tensor(group["lr"], device=p.device, dtype=torch.float32) + state["mu"] = torch.ones((), device=p.device, dtype=torch.float32) + state["ax"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + mus.append(state["mu"]) + axs.append(state["ax"]) + etas.append(state["eta"]) + state_steps.append(state["step"]) + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + mus = [] + axs = [] + etas = [] + state_steps = [] + + has_complex = self._init_group(group, params_with_grad, grads, mus, axs, etas, state_steps) + + asgd( + params_with_grad, + grads, + axs, + mus, + etas, + state_steps, + lambd=group["lambd"], + lr=group["lr"], + t0=group["t0"], + alpha=group["alpha"], + weight_decay=group["weight_decay"], + foreach=group["foreach"], + maximize=group["maximize"], + differentiable=group["differentiable"], + capturable=group["capturable"], + has_complex=has_complex, + ) + + return loss + + +ASGD.__doc__ = fr"""Implements Averaged Stochastic Gradient Descent. + + It has been proposed in `Acceleration of stochastic approximation by + averaging`_. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + lambd (float, optional): decay term (default: 1e-4) + alpha (float, optional): power for eta update (default: 0.75) + t0 (float, optional): point at which to start averaging (default: 1e6) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + {_foreach_doc} + {_maximize_doc} + {_differentiable_doc} + {_capturable_doc} For ASGD, capturable is only supported when foreach is True. + + .. _Acceleration of stochastic approximation by averaging: + https://dl.acm.org/citation.cfm?id=131098 + + """ + + +def asgd( + params: List[Tensor], + grads: List[Tensor], + axs: List[Tensor], + mus: List[Tensor], + etas: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + maximize: bool = False, + differentiable: bool = False, + capturable: bool = False, + has_complex: bool = False, + *, + lambd: float, + lr: float, + t0: float, + alpha: float, + weight_decay: float, +): + r"""Functional API that performs asgd algorithm computation. + + See :class:`~torch.optim.ASGD` for details. + """ + if foreach is None: + _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_asgd + else: + if capturable and not is_compiling(): + raise RuntimeError("Capturable not supported with single tensor ASGD") + func = _single_tensor_asgd + + func( + params, + grads, + axs, + mus, + etas, + state_steps, + lambd=lambd, + lr=lr, + t0=t0, + alpha=alpha, + weight_decay=weight_decay, + maximize=maximize, + differentiable=differentiable, + capturable=capturable, + has_complex=has_complex, + ) + + +def _single_tensor_asgd( + params: List[Tensor], + grads: List[Tensor], + axs: List[Tensor], + mus: List[Tensor], + etas: List[Tensor], + state_steps: List[Tensor], + *, + lambd: float, + lr: float, + t0: float, + alpha: float, + weight_decay: float, + maximize: bool, + differentiable: bool, + capturable: bool, + has_complex: bool, +): + for i, param in enumerate(params): + grad = grads[i] + grad = grad if not maximize else -grad + mu = mus[i] + ax = axs[i] + eta = etas[i] + step_t = state_steps[i] + + if torch.is_complex(param): + grad = torch.view_as_real(grad) + param = torch.view_as_real(param) + ax = torch.view_as_real(ax) + + # update step + step_t += 1 + step = _get_value(step_t) + + if weight_decay != 0: + grad = grad.add(param, alpha=weight_decay) + + eta_value = _get_value(eta) + # decay term + param.mul_(1 - lambd * eta_value) + + # update parameter + param.add_(grad, alpha=-eta_value) + + # averaging + if is_compiling() or mu.item() != 1: + ax.add_(param.sub(ax).mul(mu)) + else: + ax.copy_(param) + + new_eta = _to_tensor(lr / ((1 + lambd * lr * step) ** alpha)) + eta.copy_(new_eta) + new_mu = _to_tensor(1 / max(1, step - t0)) + mu.copy_(new_mu) + + +def _multi_tensor_asgd( + params: List[Tensor], + grads: List[Tensor], + axs: List[Tensor], + mus: List[Tensor], + etas: List[Tensor], + state_steps: List[Tensor], + *, + lambd: float, + lr: float, + t0: float, + alpha: float, + weight_decay: float, + maximize: bool, + differentiable: bool, + capturable: bool, + has_complex: bool, +): + + if len(params) == 0: + return + + assert not differentiable, "_foreach ops don't support autograd" + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, axs, mus, etas, state_steps]) + for ((device, _), ((grouped_params, grouped_grads, grouped_axs, grouped_mus, + grouped_etas, grouped_state_steps), _)) in grouped_tensors.items(): + if maximize: + grouped_grads = torch._foreach_neg(grouped_grads) + + grouped_grads = list(grouped_grads) + if has_complex: + _view_as_real(grouped_params, grouped_grads, grouped_axs) + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if grouped_state_steps[0].is_cpu: + torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0) + else: + torch._foreach_add_(grouped_state_steps, 1) + + # intermediate = grad + param * lambd + if weight_decay != 0: + if maximize: + torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay) + intermediate = grouped_grads + else: + intermediate = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay) + + torch._foreach_add_(intermediate, grouped_params, alpha=lambd) + else: + intermediate = torch._foreach_add(grouped_grads, grouped_params, alpha=lambd) + + # update param + # param * (1 - lambd * eta) - eta * grad + # => param - param * lambd * eta - eta * grad + # => param - eta * intermediate + torch._foreach_addcmul_(grouped_params, intermediate, grouped_etas, value=-1) + del intermediate + + # update grouped_axs + # averaging: ax = ax + mu * (param - ax) + # Note (mlazos): We can't use lerp here since it requires weight to be float64 + # and our grouping code requires dtypes to match for all tensors in a group (and it should, since + # we use the mus in other places) + # all dtypes need to match, so we could introduce a cast in a loop + # but since this only adds one additional kernel launch, this looks like the cleaner + # and faster solution + intermediate = torch._foreach_sub(grouped_params, grouped_axs) + torch._foreach_addcmul_(grouped_axs, intermediate, grouped_mus) + del intermediate + + if capturable: + # update grouped_mus + new_mus = torch._foreach_sub(grouped_state_steps, t0) + torch._foreach_maximum_(new_mus, 1.0) + torch._foreach_reciprocal_(new_mus) + torch._foreach_copy_(grouped_mus, new_mus) + del new_mus + + # update eta = lr / (1 + lambd * lr * step^alpha) + new_etas = torch._foreach_pow(grouped_state_steps, alpha) + torch._foreach_mul_(new_etas, lambd) + torch._foreach_mul_(new_etas, lr) + torch._foreach_add_(new_etas, 1) + torch._foreach_reciprocal_(new_etas) + torch._foreach_mul_(new_etas, lr) + torch._foreach_copy_(grouped_etas, new_etas) + else: + step = grouped_state_steps[0].item() + new_etas = [] + new_mus = [] + + for i in range(len(grouped_mus)): + new_eta = _to_tensor( + lr / (1 + lambd * lr * step ** alpha), device=device + ) + new_etas.append(new_eta) + new_mu = _to_tensor(1 / max(1, step - t0), device=device) + new_mus.append(new_mu) + + torch._foreach_copy_(grouped_etas, new_etas) + torch._foreach_copy_(grouped_mus, new_mus) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/asgd.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/asgd.pyi new file mode 100644 index 0000000000000000000000000000000000000000..634b0d162cebdbbb5ae8065a5318188da26f3246 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/asgd.pyi @@ -0,0 +1,12 @@ +from .optimizer import Optimizer, ParamsT + +class ASGD(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + lambd: float = ..., + alpha: float = ..., + t0: float = ..., + weight_decay: float = ..., + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/lr_scheduler.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/lr_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..df659b61a998b86085d31cc34013bea129ccefba --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/lr_scheduler.py @@ -0,0 +1,1790 @@ +import types +import math +from torch import inf +from functools import wraps, partial +import warnings +import weakref +from collections import Counter +from bisect import bisect_right + +from .optimizer import Optimizer + +__all__ = ['LambdaLR', 'MultiplicativeLR', 'StepLR', 'MultiStepLR', 'ConstantLR', 'LinearLR', + 'ExponentialLR', 'SequentialLR', 'CosineAnnealingLR', 'ChainedScheduler', 'ReduceLROnPlateau', + 'CyclicLR', 'CosineAnnealingWarmRestarts', 'OneCycleLR', 'PolynomialLR', 'LRScheduler'] + +EPOCH_DEPRECATION_WARNING = ( + "The epoch parameter in `scheduler.step()` was not necessary and is being " + "deprecated where possible. Please use `scheduler.step()` to step the " + "scheduler. During the deprecation, if epoch is different from None, the " + "closed form is used instead of the new chainable form, where available. " + "Please open an issue if you are unable to replicate your use case: " + "https://github.com/pytorch/pytorch/issues/new/choose." +) + +def _check_verbose_deprecated_warning(verbose): + """Raises a warning when verbose is not the default value.""" + if verbose != "deprecated": + warnings.warn("The verbose parameter is deprecated. Please use get_last_lr() " + "to access the learning rate.", UserWarning) + return verbose + return False + +class LRScheduler: + + def __init__(self, optimizer, last_epoch=-1, verbose="deprecated"): + + # Attach optimizer + if not isinstance(optimizer, Optimizer): + raise TypeError(f'{type(optimizer).__name__} is not an Optimizer') + self.optimizer = optimizer + + # Initialize epoch and base learning rates + if last_epoch == -1: + for group in optimizer.param_groups: + group.setdefault('initial_lr', group['lr']) + else: + for i, group in enumerate(optimizer.param_groups): + if 'initial_lr' not in group: + raise KeyError("param 'initial_lr' is not specified " + f"in param_groups[{i}] when resuming an optimizer") + self.base_lrs = [group['initial_lr'] for group in optimizer.param_groups] + self.last_epoch = last_epoch + + # Following https://github.com/pytorch/pytorch/issues/20124 + # We would like to ensure that `lr_scheduler.step()` is called after + # `optimizer.step()` + def with_counter(method): + if getattr(method, '_with_counter', False): + # `optimizer.step()` has already been replaced, return. + return method + + # Keep a weak reference to the optimizer instance to prevent + # cyclic references. + instance_ref = weakref.ref(method.__self__) + # Get the unbound method for the same purpose. + func = method.__func__ + cls = instance_ref().__class__ + del method + + @wraps(func) + def wrapper(*args, **kwargs): + instance = instance_ref() + instance._step_count += 1 + wrapped = func.__get__(instance, cls) + return wrapped(*args, **kwargs) + + # Note that the returned function here is no longer a bound method, + # so attributes like `__func__` and `__self__` no longer exist. + wrapper._with_counter = True + return wrapper + + self.optimizer.step = with_counter(self.optimizer.step) + self.verbose = _check_verbose_deprecated_warning(verbose) + + self._initial_step() + + def _initial_step(self): + """Initialize step counts and performs a step""" + self.optimizer._step_count = 0 + self._step_count = 0 + self.step() + + def state_dict(self): + """Returns the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + """ + return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} + + def load_state_dict(self, state_dict): + """Loads the schedulers state. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + """ + self.__dict__.update(state_dict) + + def get_last_lr(self): + """ Return last computed learning rate by current scheduler. + """ + return self._last_lr + + def get_lr(self): + # Compute learning rate using chainable form of the scheduler + raise NotImplementedError + + def print_lr(self, is_verbose, group, lr, epoch=None): + """Display the current learning rate. + """ + if is_verbose: + if epoch is None: + print(f'Adjusting learning rate of group {group} to {lr:.4e}.') + else: + epoch_str = ("%.2f" if isinstance(epoch, float) else + "%.5d") % epoch + print(f'Epoch {epoch_str}: adjusting learning rate of group {group} to {lr:.4e}.') + + + def step(self, epoch=None): + # Raise a warning if old pattern is detected + # https://github.com/pytorch/pytorch/issues/20124 + if self._step_count == 1: + if not hasattr(self.optimizer.step, "_with_counter"): + warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler " + "initialization. Please, make sure to call `optimizer.step()` before " + "`lr_scheduler.step()`. See more details at " + "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning) + + # Just check if there were two first lr_scheduler.step() calls before optimizer.step() + elif self.optimizer._step_count < 1: + warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. " + "In PyTorch 1.1.0 and later, you should call them in the opposite order: " + "`optimizer.step()` before `lr_scheduler.step()`. Failure to do this " + "will result in PyTorch skipping the first value of the learning rate schedule. " + "See more details at " + "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning) + self._step_count += 1 + + with _enable_get_lr_call(self): + if epoch is None: + self.last_epoch += 1 + values = self.get_lr() + else: + warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning) + self.last_epoch = epoch + if hasattr(self, "_get_closed_form_lr"): + values = self._get_closed_form_lr() + else: + values = self.get_lr() + + for i, data in enumerate(zip(self.optimizer.param_groups, values)): + param_group, lr = data + param_group['lr'] = lr + + self._last_lr = [group['lr'] for group in self.optimizer.param_groups] + + +# Including _LRScheduler for backwards compatibility +# Subclass instead of assign because we want __name__ of _LRScheduler to be _LRScheduler (assigning would make it LRScheduler). +class _LRScheduler(LRScheduler): + pass + + +class _enable_get_lr_call: + + def __init__(self, o): + self.o = o + + def __enter__(self): + self.o._get_lr_called_within_step = True + return self + + def __exit__(self, type, value, traceback): + self.o._get_lr_called_within_step = False + + +class LambdaLR(LRScheduler): + """Sets the learning rate of each parameter group to the initial lr + times a given function. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + lr_lambda (function or list): A function which computes a multiplicative + factor given an integer parameter epoch, or a list of such + functions, one for each group in optimizer.param_groups. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer has two groups. + >>> lambda1 = lambda epoch: epoch // 30 + >>> lambda2 = lambda epoch: 0.95 ** epoch + >>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2]) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__(self, optimizer, lr_lambda, last_epoch=-1, verbose="deprecated"): + self.optimizer = optimizer + + if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple): + self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups) + else: + if len(lr_lambda) != len(optimizer.param_groups): + raise ValueError(f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}") + self.lr_lambdas = list(lr_lambda) + super().__init__(optimizer, last_epoch, verbose) + + def state_dict(self): + """Returns the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + The learning rate lambda functions will only be saved if they are callable objects + and not if they are functions or lambdas. + + When saving or loading the scheduler, please make sure to also save or load the state of the optimizer. + """ + + state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')} + state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas) + + for idx, fn in enumerate(self.lr_lambdas): + if not isinstance(fn, types.FunctionType): + state_dict['lr_lambdas'][idx] = fn.__dict__.copy() + + return state_dict + + def load_state_dict(self, state_dict): + """Loads the schedulers state. + + When saving or loading the scheduler, please make sure to also save or load the state of the optimizer. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + """ + + lr_lambdas = state_dict.pop('lr_lambdas') + self.__dict__.update(state_dict) + # Restore state_dict keys in order to prevent side effects + # https://github.com/pytorch/pytorch/issues/32756 + state_dict['lr_lambdas'] = lr_lambdas + + for idx, fn in enumerate(lr_lambdas): + if fn is not None: + self.lr_lambdas[idx].__dict__.update(fn) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.") + + return [base_lr * lmbda(self.last_epoch) + for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)] + + +class MultiplicativeLR(LRScheduler): + """Multiply the learning rate of each parameter group by the factor given + in the specified function. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + lr_lambda (function or list): A function which computes a multiplicative + factor given an integer parameter epoch, or a list of such + functions, one for each group in optimizer.param_groups. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> lmbda = lambda epoch: 0.95 + >>> scheduler = MultiplicativeLR(optimizer, lr_lambda=lmbda) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__(self, optimizer, lr_lambda, last_epoch=-1, verbose="deprecated"): + self.optimizer = optimizer + + if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple): + self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups) + else: + if len(lr_lambda) != len(optimizer.param_groups): + raise ValueError(f"Expected {len(optimizer.param_groups)} lr_lambdas, but got {len(lr_lambda)}") + self.lr_lambdas = list(lr_lambda) + super().__init__(optimizer, last_epoch, verbose) + + def state_dict(self): + """Returns the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + The learning rate lambda functions will only be saved if they are callable objects + and not if they are functions or lambdas. + """ + state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')} + state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas) + + for idx, fn in enumerate(self.lr_lambdas): + if not isinstance(fn, types.FunctionType): + state_dict['lr_lambdas'][idx] = fn.__dict__.copy() + + return state_dict + + def load_state_dict(self, state_dict): + """Loads the schedulers state. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + """ + lr_lambdas = state_dict.pop('lr_lambdas') + self.__dict__.update(state_dict) + # Restore state_dict keys in order to prevent side effects + # https://github.com/pytorch/pytorch/issues/32756 + state_dict['lr_lambdas'] = lr_lambdas + + for idx, fn in enumerate(lr_lambdas): + if fn is not None: + self.lr_lambdas[idx].__dict__.update(fn) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + + if self.last_epoch > 0: + return [group['lr'] * lmbda(self.last_epoch) + for lmbda, group in zip(self.lr_lambdas, self.optimizer.param_groups)] + else: + return [group['lr'] for group in self.optimizer.param_groups] + + +class StepLR(LRScheduler): + """Decays the learning rate of each parameter group by gamma every + step_size epochs. Notice that such decay can happen simultaneously with + other changes to the learning rate from outside this scheduler. When + last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + step_size (int): Period of learning rate decay. + gamma (float): Multiplicative factor of learning rate decay. + Default: 0.1. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.05 if epoch < 30 + >>> # lr = 0.005 if 30 <= epoch < 60 + >>> # lr = 0.0005 if 60 <= epoch < 90 + >>> # ... + >>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=-1, verbose="deprecated"): + self.step_size = step_size + self.gamma = gamma + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + + if (self.last_epoch == 0) or (self.last_epoch % self.step_size != 0): + return [group['lr'] for group in self.optimizer.param_groups] + return [group['lr'] * self.gamma + for group in self.optimizer.param_groups] + + def _get_closed_form_lr(self): + return [base_lr * self.gamma ** (self.last_epoch // self.step_size) + for base_lr in self.base_lrs] + + +class MultiStepLR(LRScheduler): + """Decays the learning rate of each parameter group by gamma once the + number of epoch reaches one of the milestones. Notice that such decay can + happen simultaneously with other changes to the learning rate from outside + this scheduler. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + milestones (list): List of epoch indices. Must be increasing. + gamma (float): Multiplicative factor of learning rate decay. + Default: 0.1. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.05 if epoch < 30 + >>> # lr = 0.005 if 30 <= epoch < 80 + >>> # lr = 0.0005 if epoch >= 80 + >>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1, verbose="deprecated"): + self.milestones = Counter(milestones) + self.gamma = gamma + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + + if self.last_epoch not in self.milestones: + return [group['lr'] for group in self.optimizer.param_groups] + return [group['lr'] * self.gamma ** self.milestones[self.last_epoch] + for group in self.optimizer.param_groups] + + def _get_closed_form_lr(self): + milestones = sorted(self.milestones.elements()) + return [base_lr * self.gamma ** bisect_right(milestones, self.last_epoch) + for base_lr in self.base_lrs] + + +class ConstantLR(LRScheduler): + """Decays the learning rate of each parameter group by a small constant factor until the + number of epoch reaches a pre-defined milestone: total_iters. Notice that such decay can + happen simultaneously with other changes to the learning rate from outside this scheduler. + When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + factor (float): The number we multiply learning rate until the milestone. Default: 1./3. + total_iters (int): The number of steps that the scheduler decays the learning rate. + Default: 5. + last_epoch (int): The index of the last epoch. Default: -1. + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.025 if epoch == 0 + >>> # lr = 0.025 if epoch == 1 + >>> # lr = 0.025 if epoch == 2 + >>> # lr = 0.025 if epoch == 3 + >>> # lr = 0.05 if epoch >= 4 + >>> scheduler = ConstantLR(self.opt, factor=0.5, total_iters=4) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__(self, optimizer, factor=1.0 / 3, total_iters=5, last_epoch=-1, verbose="deprecated"): + if factor > 1.0 or factor < 0: + raise ValueError('Constant multiplicative factor expected to be between 0 and 1.') + + self.factor = factor + self.total_iters = total_iters + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + + if self.last_epoch == 0: + return [group['lr'] * self.factor for group in self.optimizer.param_groups] + + if self.last_epoch != self.total_iters: + return [group['lr'] for group in self.optimizer.param_groups] + + return [group['lr'] * (1.0 / self.factor) for group in self.optimizer.param_groups] + + def _get_closed_form_lr(self): + return [base_lr * (self.factor + (self.last_epoch >= self.total_iters) * (1 - self.factor)) + for base_lr in self.base_lrs] + + +class LinearLR(LRScheduler): + """Decays the learning rate of each parameter group by linearly changing small + multiplicative factor until the number of epoch reaches a pre-defined milestone: total_iters. + Notice that such decay can happen simultaneously with other changes to the learning rate + from outside this scheduler. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + start_factor (float): The number we multiply learning rate in the first epoch. + The multiplication factor changes towards end_factor in the following epochs. + Default: 1./3. + end_factor (float): The number we multiply learning rate at the end of linear changing + process. Default: 1.0. + total_iters (int): The number of iterations that multiplicative factor reaches to 1. + Default: 5. + last_epoch (int): The index of the last epoch. Default: -1. + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 0.05 for all groups + >>> # lr = 0.025 if epoch == 0 + >>> # lr = 0.03125 if epoch == 1 + >>> # lr = 0.0375 if epoch == 2 + >>> # lr = 0.04375 if epoch == 3 + >>> # lr = 0.05 if epoch >= 4 + >>> scheduler = LinearLR(self.opt, start_factor=0.5, total_iters=4) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__(self, optimizer, start_factor=1.0 / 3, end_factor=1.0, total_iters=5, last_epoch=-1, + verbose="deprecated"): + if start_factor > 1.0 or start_factor <= 0: + raise ValueError('Starting multiplicative factor expected to be greater than 0 and less or equal to 1.') + + if end_factor > 1.0 or end_factor < 0: + raise ValueError('Ending multiplicative factor expected to be between 0 and 1.') + + self.start_factor = start_factor + self.end_factor = end_factor + self.total_iters = total_iters + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + + if self.last_epoch == 0: + return [group['lr'] * self.start_factor for group in self.optimizer.param_groups] + + if self.last_epoch > self.total_iters: + return [group['lr'] for group in self.optimizer.param_groups] + + return [group['lr'] * (1. + (self.end_factor - self.start_factor) / + (self.total_iters * self.start_factor + (self.last_epoch - 1) * (self.end_factor - self.start_factor))) + for group in self.optimizer.param_groups] + + def _get_closed_form_lr(self): + return [base_lr * (self.start_factor + + (self.end_factor - self.start_factor) * min(self.total_iters, self.last_epoch) / self.total_iters) + for base_lr in self.base_lrs] + + +class ExponentialLR(LRScheduler): + """Decays the learning rate of each parameter group by gamma every epoch. + When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + gamma (float): Multiplicative factor of learning rate decay. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + """ + + def __init__(self, optimizer, gamma, last_epoch=-1, verbose="deprecated"): + self.gamma = gamma + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + + if self.last_epoch == 0: + return [group['lr'] for group in self.optimizer.param_groups] + return [group['lr'] * self.gamma + for group in self.optimizer.param_groups] + + def _get_closed_form_lr(self): + return [base_lr * self.gamma ** self.last_epoch + for base_lr in self.base_lrs] + + +class SequentialLR(LRScheduler): + """Receives the list of schedulers that is expected to be called sequentially during + optimization process and milestone points that provides exact intervals to reflect + which scheduler is supposed to be called at a given epoch. + + Args: + optimizer (Optimizer): Wrapped optimizer. + schedulers (list): List of chained schedulers. + milestones (list): List of integers that reflects milestone points. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool): Does nothing. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 1. for all groups + >>> # lr = 0.1 if epoch == 0 + >>> # lr = 0.1 if epoch == 1 + >>> # lr = 0.9 if epoch == 2 + >>> # lr = 0.81 if epoch == 3 + >>> # lr = 0.729 if epoch == 4 + >>> scheduler1 = ConstantLR(self.opt, factor=0.1, total_iters=2) + >>> scheduler2 = ExponentialLR(self.opt, gamma=0.9) + >>> scheduler = SequentialLR(self.opt, schedulers=[scheduler1, scheduler2], milestones=[2]) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__(self, optimizer, schedulers, milestones, last_epoch=-1, verbose="deprecated"): + for scheduler_idx in range(len(schedulers)): + if schedulers[scheduler_idx].optimizer != optimizer: + raise ValueError( + "Sequential Schedulers expects all schedulers to belong to the same optimizer, but " + f"got schedulers at index {scheduler_idx} to be different than the optimizer passed in." + ) + + if (schedulers[scheduler_idx].optimizer != schedulers[0].optimizer): + raise ValueError( + "Sequential Schedulers expects all schedulers to belong to the same optimizer, but " + f"got schedulers at index {0} and {scheduler_idx} to be different." + ) + if (len(milestones) != len(schedulers) - 1): + raise ValueError( + "Sequential Schedulers expects number of schedulers provided to be one more " + f"than the number of milestone points, but got number of schedulers {len(schedulers)} and the " + f"number of milestones to be equal to {len(milestones)}" + ) + _check_verbose_deprecated_warning(verbose) + self._schedulers = schedulers + self._milestones = milestones + self.last_epoch = last_epoch + 1 + self.optimizer = optimizer + + # Reset learning rates back to initial values + for group in self.optimizer.param_groups: + group["lr"] = group["initial_lr"] + + # "Undo" the step performed by other schedulers + for scheduler in self._schedulers: + scheduler.last_epoch -= 1 + + # Perform the initial step for only the first scheduler + self._schedulers[0]._initial_step() + + self._last_lr = schedulers[0].get_last_lr() + + def step(self): + self.last_epoch += 1 + idx = bisect_right(self._milestones, self.last_epoch) + scheduler = self._schedulers[idx] + if idx > 0 and self._milestones[idx - 1] == self.last_epoch: + scheduler.step(0) + else: + scheduler.step() + + self._last_lr = scheduler.get_last_lr() + + def state_dict(self): + """Returns the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + The wrapped scheduler states will also be saved. + """ + state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', '_schedulers')} + state_dict['_schedulers'] = [None] * len(self._schedulers) + + for idx, s in enumerate(self._schedulers): + state_dict['_schedulers'][idx] = s.state_dict() + + return state_dict + + def load_state_dict(self, state_dict): + """Loads the schedulers state. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + """ + _schedulers = state_dict.pop('_schedulers') + self.__dict__.update(state_dict) + # Restore state_dict keys in order to prevent side effects + # https://github.com/pytorch/pytorch/issues/32756 + state_dict['_schedulers'] = _schedulers + + for idx, s in enumerate(_schedulers): + self._schedulers[idx].load_state_dict(s) + + +class PolynomialLR(LRScheduler): + """Decays the learning rate of each parameter group using a polynomial function + in the given total_iters. When last_epoch=-1, sets initial lr as lr. + + Args: + optimizer (Optimizer): Wrapped optimizer. + total_iters (int): The number of steps that the scheduler decays the learning rate. Default: 5. + power (float): The power of the polynomial. Default: 1.0. + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP("undefined vars") + >>> # Assuming optimizer uses lr = 0.001 for all groups + >>> # lr = 0.001 if epoch == 0 + >>> # lr = 0.00075 if epoch == 1 + >>> # lr = 0.00050 if epoch == 2 + >>> # lr = 0.00025 if epoch == 3 + >>> # lr = 0.0 if epoch >= 4 + >>> scheduler = PolynomialLR(self.opt, total_iters=4, power=1.0) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + def __init__(self, optimizer, total_iters=5, power=1.0, last_epoch=-1, verbose="deprecated"): + self.total_iters = total_iters + self.power = power + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + + if self.last_epoch == 0 or self.last_epoch > self.total_iters: + return [group["lr"] for group in self.optimizer.param_groups] + + decay_factor = ((1.0 - self.last_epoch / self.total_iters) / (1.0 - (self.last_epoch - 1) / self.total_iters)) ** self.power + return [group["lr"] * decay_factor for group in self.optimizer.param_groups] + + def _get_closed_form_lr(self): + return [ + ( + base_lr * (1.0 - min(self.total_iters, self.last_epoch) / self.total_iters) ** self.power + ) + for base_lr in self.base_lrs + ] + + +class CosineAnnealingLR(LRScheduler): + r"""Set the learning rate of each parameter group using a cosine annealing + schedule, where :math:`\eta_{max}` is set to the initial lr and + :math:`T_{cur}` is the number of epochs since the last restart in SGDR: + + .. math:: + \begin{aligned} + \eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right), + & T_{cur} \neq (2k+1)T_{max}; \\ + \eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min}) + \left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right), + & T_{cur} = (2k+1)T_{max}. + \end{aligned} + + When last_epoch=-1, sets initial lr as lr. Notice that because the schedule + is defined recursively, the learning rate can be simultaneously modified + outside this scheduler by other operators. If the learning rate is set + solely by this scheduler, the learning rate at each step becomes: + + .. math:: + \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right) + + It has been proposed in + `SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only + implements the cosine annealing part of SGDR, and not the restarts. + + Args: + optimizer (Optimizer): Wrapped optimizer. + T_max (int): Maximum number of iterations. + eta_min (float): Minimum learning rate. Default: 0. + last_epoch (int): The index of last epoch. Default: -1. + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: + https://arxiv.org/abs/1608.03983 + """ + + def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1, verbose="deprecated"): + self.T_max = T_max + self.eta_min = eta_min + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + + if self.last_epoch == 0: + return [group['lr'] for group in self.optimizer.param_groups] + elif self._step_count == 1 and self.last_epoch > 0: + return [self.eta_min + (base_lr - self.eta_min) * + (1 + math.cos((self.last_epoch) * math.pi / self.T_max)) / 2 + for base_lr, group in + zip(self.base_lrs, self.optimizer.param_groups)] + elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0: + return [group['lr'] + (base_lr - self.eta_min) * + (1 - math.cos(math.pi / self.T_max)) / 2 + for base_lr, group in + zip(self.base_lrs, self.optimizer.param_groups)] + return [(1 + math.cos(math.pi * self.last_epoch / self.T_max)) / + (1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max)) * + (group['lr'] - self.eta_min) + self.eta_min + for group in self.optimizer.param_groups] + + def _get_closed_form_lr(self): + return [self.eta_min + (base_lr - self.eta_min) * + (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2 + for base_lr in self.base_lrs] + + +class ChainedScheduler(LRScheduler): + """Chains list of learning rate schedulers. It takes a list of chainable learning + rate schedulers and performs consecutive step() functions belonging to them by just + one call. + + Args: + schedulers (list): List of chained schedulers. + + Example: + >>> # xdoctest: +SKIP + >>> # Assuming optimizer uses lr = 1. for all groups + >>> # lr = 0.09 if epoch == 0 + >>> # lr = 0.081 if epoch == 1 + >>> # lr = 0.729 if epoch == 2 + >>> # lr = 0.6561 if epoch == 3 + >>> # lr = 0.59049 if epoch >= 4 + >>> scheduler1 = ConstantLR(self.opt, factor=0.1, total_iters=2) + >>> scheduler2 = ExponentialLR(self.opt, gamma=0.9) + >>> scheduler = ChainedScheduler([scheduler1, scheduler2]) + >>> for epoch in range(100): + >>> train(...) + >>> validate(...) + >>> scheduler.step() + """ + + def __init__(self, schedulers): + for scheduler_idx in range(1, len(schedulers)): + if (schedulers[scheduler_idx].optimizer != schedulers[0].optimizer): + raise ValueError( + "ChainedScheduler expects all schedulers to belong to the same optimizer, but " + f"got schedulers at index {0} and {scheduler_idx} to be different" + ) + self._schedulers = list(schedulers) + self.optimizer = schedulers[0].optimizer + self._last_lr = [group['lr'] for group in self._schedulers[-1].optimizer.param_groups] + + def step(self): + for scheduler in self._schedulers: + scheduler.step() + self._last_lr = [group['lr'] for group in self._schedulers[-1].optimizer.param_groups] + + def state_dict(self): + """Returns the state of the scheduler as a :class:`dict`. + + It contains an entry for every variable in self.__dict__ which + is not the optimizer. + The wrapped scheduler states will also be saved. + """ + state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', '_schedulers')} + state_dict['_schedulers'] = [None] * len(self._schedulers) + + for idx, s in enumerate(self._schedulers): + state_dict['_schedulers'][idx] = s.state_dict() + + return state_dict + + def load_state_dict(self, state_dict): + """Loads the schedulers state. + + Args: + state_dict (dict): scheduler state. Should be an object returned + from a call to :meth:`state_dict`. + """ + _schedulers = state_dict.pop('_schedulers') + self.__dict__.update(state_dict) + # Restore state_dict keys in order to prevent side effects + # https://github.com/pytorch/pytorch/issues/32756 + state_dict['_schedulers'] = _schedulers + + for idx, s in enumerate(_schedulers): + self._schedulers[idx].load_state_dict(s) + + +class ReduceLROnPlateau(LRScheduler): + """Reduce learning rate when a metric has stopped improving. + Models often benefit from reducing the learning rate by a factor + of 2-10 once learning stagnates. This scheduler reads a metrics + quantity and if no improvement is seen for a 'patience' number + of epochs, the learning rate is reduced. + + Args: + optimizer (Optimizer): Wrapped optimizer. + mode (str): One of `min`, `max`. In `min` mode, lr will + be reduced when the quantity monitored has stopped + decreasing; in `max` mode it will be reduced when the + quantity monitored has stopped increasing. Default: 'min'. + factor (float): Factor by which the learning rate will be + reduced. new_lr = lr * factor. Default: 0.1. + patience (int): Number of epochs with no improvement after + which learning rate will be reduced. For example, if + `patience = 2`, then we will ignore the first 2 epochs + with no improvement, and will only decrease the LR after the + 3rd epoch if the loss still hasn't improved then. + Default: 10. + threshold (float): Threshold for measuring the new optimum, + to only focus on significant changes. Default: 1e-4. + threshold_mode (str): One of `rel`, `abs`. In `rel` mode, + dynamic_threshold = best * ( 1 + threshold ) in 'max' + mode or best * ( 1 - threshold ) in `min` mode. + In `abs` mode, dynamic_threshold = best + threshold in + `max` mode or best - threshold in `min` mode. Default: 'rel'. + cooldown (int): Number of epochs to wait before resuming + normal operation after lr has been reduced. Default: 0. + min_lr (float or list): A scalar or a list of scalars. A + lower bound on the learning rate of all param groups + or each group respectively. Default: 0. + eps (float): Minimal decay applied to lr. If the difference + between new and old lr is smaller than eps, the update is + ignored. Default: 1e-8. + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = ReduceLROnPlateau(optimizer, 'min') + >>> for epoch in range(10): + >>> train(...) + >>> val_loss = validate(...) + >>> # Note that step should be called after validate() + >>> scheduler.step(val_loss) + """ + + def __init__(self, optimizer, mode='min', factor=0.1, patience=10, + threshold=1e-4, threshold_mode='rel', cooldown=0, + min_lr=0, eps=1e-8, verbose="deprecated"): + + if factor >= 1.0: + raise ValueError('Factor should be < 1.0.') + self.factor = factor + + # Attach optimizer + if not isinstance(optimizer, Optimizer): + raise TypeError(f'{type(optimizer).__name__} is not an Optimizer') + self.optimizer = optimizer + + if isinstance(min_lr, (list, tuple)): + if len(min_lr) != len(optimizer.param_groups): + raise ValueError(f"expected {len(optimizer.param_groups)} min_lrs, got {len(min_lr)}") + self.min_lrs = list(min_lr) + else: + self.min_lrs = [min_lr] * len(optimizer.param_groups) + + self.patience = patience + + self.verbose = _check_verbose_deprecated_warning(verbose) + self.cooldown = cooldown + self.cooldown_counter = 0 + self.mode = mode + self.threshold = threshold + self.threshold_mode = threshold_mode + self.best = None + self.num_bad_epochs = None + self.mode_worse = None # the worse value for the chosen mode + self.eps = eps + self.last_epoch = 0 + self._init_is_better(mode=mode, threshold=threshold, + threshold_mode=threshold_mode) + self._reset() + + def _reset(self): + """Resets num_bad_epochs counter and cooldown counter.""" + self.best = self.mode_worse + self.cooldown_counter = 0 + self.num_bad_epochs = 0 + + def step(self, metrics, epoch=None): + # convert `metrics` to float, in case it's a zero-dim Tensor + current = float(metrics) + if epoch is None: + epoch = self.last_epoch + 1 + else: + warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning) + self.last_epoch = epoch + + if self.is_better(current, self.best): + self.best = current + self.num_bad_epochs = 0 + else: + self.num_bad_epochs += 1 + + if self.in_cooldown: + self.cooldown_counter -= 1 + self.num_bad_epochs = 0 # ignore any bad epochs in cooldown + + if self.num_bad_epochs > self.patience: + self._reduce_lr(epoch) + self.cooldown_counter = self.cooldown + self.num_bad_epochs = 0 + + self._last_lr = [group['lr'] for group in self.optimizer.param_groups] + + def _reduce_lr(self, epoch): + for i, param_group in enumerate(self.optimizer.param_groups): + old_lr = float(param_group['lr']) + new_lr = max(old_lr * self.factor, self.min_lrs[i]) + if old_lr - new_lr > self.eps: + param_group['lr'] = new_lr + + @property + def in_cooldown(self): + return self.cooldown_counter > 0 + + def is_better(self, a, best): + if self.mode == 'min' and self.threshold_mode == 'rel': + rel_epsilon = 1. - self.threshold + return a < best * rel_epsilon + + elif self.mode == 'min' and self.threshold_mode == 'abs': + return a < best - self.threshold + + elif self.mode == 'max' and self.threshold_mode == 'rel': + rel_epsilon = self.threshold + 1. + return a > best * rel_epsilon + + else: # mode == 'max' and epsilon_mode == 'abs': + return a > best + self.threshold + + def _init_is_better(self, mode, threshold, threshold_mode): + if mode not in {'min', 'max'}: + raise ValueError('mode ' + mode + ' is unknown!') + if threshold_mode not in {'rel', 'abs'}: + raise ValueError('threshold mode ' + threshold_mode + ' is unknown!') + + if mode == 'min': + self.mode_worse = inf + else: # mode == 'max': + self.mode_worse = -inf + + self.mode = mode + self.threshold = threshold + self.threshold_mode = threshold_mode + + def state_dict(self): + return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} + + def load_state_dict(self, state_dict): + self.__dict__.update(state_dict) + self._init_is_better(mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode) + + +class CyclicLR(LRScheduler): + r"""Sets the learning rate of each parameter group according to + cyclical learning rate policy (CLR). The policy cycles the learning + rate between two boundaries with a constant frequency, as detailed in + the paper `Cyclical Learning Rates for Training Neural Networks`_. + The distance between the two boundaries can be scaled on a per-iteration + or per-cycle basis. + + Cyclical learning rate policy changes the learning rate after every batch. + `step` should be called after a batch has been used for training. + + This class has three built-in policies, as put forth in the paper: + + * "triangular": A basic triangular cycle without amplitude scaling. + * "triangular2": A basic triangular cycle that scales initial amplitude by half each cycle. + * "exp_range": A cycle that scales initial amplitude by :math:`\text{gamma}^{\text{cycle iterations}}` + at each cycle iteration. + + This implementation was adapted from the github repo: `bckenstler/CLR`_ + + Args: + optimizer (Optimizer): Wrapped optimizer. + base_lr (float or list): Initial learning rate which is the + lower boundary in the cycle for each parameter group. + max_lr (float or list): Upper learning rate boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (max_lr - base_lr). + The lr at any cycle is the sum of base_lr + and some scaling of the amplitude; therefore + max_lr may not actually be reached depending on + scaling function. + step_size_up (int): Number of training iterations in the + increasing half of a cycle. Default: 2000 + step_size_down (int): Number of training iterations in the + decreasing half of a cycle. If step_size_down is None, + it is set to step_size_up. Default: None + mode (str): One of {triangular, triangular2, exp_range}. + Values correspond to policies detailed above. + If scale_fn is not None, this argument is ignored. + Default: 'triangular' + gamma (float): Constant in 'exp_range' scaling function: + gamma**(cycle iterations) + Default: 1.0 + scale_fn (function): Custom scaling policy defined by a single + argument lambda function, where + 0 <= scale_fn(x) <= 1 for all x >= 0. + If specified, then 'mode' is ignored. + Default: None + scale_mode (str): {'cycle', 'iterations'}. + Defines whether scale_fn is evaluated on + cycle number or cycle iterations (training + iterations since start of cycle). + Default: 'cycle' + cycle_momentum (bool): If ``True``, momentum is cycled inversely + to learning rate between 'base_momentum' and 'max_momentum'. + Default: True + base_momentum (float or list): Lower momentum boundaries in the cycle + for each parameter group. Note that momentum is cycled inversely + to learning rate; at the peak of a cycle, momentum is + 'base_momentum' and learning rate is 'max_lr'. + Default: 0.8 + max_momentum (float or list): Upper momentum boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (max_momentum - base_momentum). + The momentum at any cycle is the difference of max_momentum + and some scaling of the amplitude; therefore + base_momentum may not actually be reached depending on + scaling function. Note that momentum is cycled inversely + to learning rate; at the start of a cycle, momentum is 'max_momentum' + and learning rate is 'base_lr' + Default: 0.9 + last_epoch (int): The index of the last batch. This parameter is used when + resuming a training job. Since `step()` should be invoked after each + batch instead of after each epoch, this number represents the total + number of *batches* computed, not the total number of epochs computed. + When last_epoch=-1, the schedule is started from the beginning. + Default: -1 + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1) + >>> data_loader = torch.utils.data.DataLoader(...) + >>> for epoch in range(10): + >>> for batch in data_loader: + >>> train_batch(...) + >>> scheduler.step() + + + .. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186 + .. _bckenstler/CLR: https://github.com/bckenstler/CLR + """ + + def __init__(self, + optimizer, + base_lr, + max_lr, + step_size_up=2000, + step_size_down=None, + mode='triangular', + gamma=1., + scale_fn=None, + scale_mode='cycle', + cycle_momentum=True, + base_momentum=0.8, + max_momentum=0.9, + last_epoch=-1, + verbose="deprecated"): + + # Attach optimizer + if not isinstance(optimizer, Optimizer): + raise TypeError(f'{type(optimizer).__name__} is not an Optimizer') + self.optimizer = optimizer + + base_lrs = self._format_param('base_lr', optimizer, base_lr) + if last_epoch == -1: + for lr, group in zip(base_lrs, optimizer.param_groups): + group['lr'] = lr + + self.max_lrs = self._format_param('max_lr', optimizer, max_lr) + + step_size_up = float(step_size_up) + step_size_down = float(step_size_down) if step_size_down is not None else step_size_up + self.total_size = step_size_up + step_size_down + self.step_ratio = step_size_up / self.total_size + + if mode not in ['triangular', 'triangular2', 'exp_range'] \ + and scale_fn is None: + raise ValueError('mode is invalid and scale_fn is None') + + self.mode = mode + self.gamma = gamma + + self._scale_fn_ref = None + self._scale_fn_custom = scale_fn + self.scale_mode = scale_mode + self._init_scale_fn() + + self.cycle_momentum = cycle_momentum + if cycle_momentum: + if 'momentum' not in optimizer.defaults: + raise ValueError('optimizer must support momentum with `cycle_momentum` option enabled') + + base_momentums = self._format_param('base_momentum', optimizer, base_momentum) + if last_epoch == -1: + for momentum, group in zip(base_momentums, optimizer.param_groups): + group['momentum'] = momentum + self.base_momentums = [group['momentum'] for group in optimizer.param_groups] + self.max_momentums = self._format_param('max_momentum', optimizer, max_momentum) + + super().__init__(optimizer, last_epoch, verbose) + self.base_lrs = base_lrs + + def _init_scale_fn(self): + if self._scale_fn_custom is not None: + return + if self.mode == 'triangular': + self._scale_fn_ref = self._triangular_scale_fn + self.scale_mode = 'cycle' + elif self.mode == 'triangular2': + self._scale_fn_ref = self._triangular2_scale_fn + self.scale_mode = 'cycle' + elif self.mode == 'exp_range': + self._scale_fn_ref = partial(self._exp_range_scale_fn, self.gamma) + self.scale_mode = 'iterations' + + def _format_param(self, name, optimizer, param): + """Return correctly formatted lr/momentum for each param group.""" + if isinstance(param, (list, tuple)): + if len(param) != len(optimizer.param_groups): + raise ValueError(f"expected {len(optimizer.param_groups)} values for {name}, got {len(param)}") + return param + else: + return [param] * len(optimizer.param_groups) + + def scale_fn(self, x): + if self._scale_fn_custom is not None: + return self._scale_fn_custom(x) + else: + return self._scale_fn_ref(x) # static method + + @staticmethod + def _triangular_scale_fn(x): + return 1. + + @staticmethod + def _triangular2_scale_fn(x): + return 1 / (2. ** (x - 1)) + + @staticmethod + def _exp_range_scale_fn(gamma, x): + return gamma ** x + + def get_lr(self): + """Calculates the learning rate at batch index. This function treats + `self.last_epoch` as the last batch index. + + If `self.cycle_momentum` is ``True``, this function has a side effect of + updating the optimizer's momentum. + """ + + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + + cycle = math.floor(1 + self.last_epoch / self.total_size) + x = 1. + self.last_epoch / self.total_size - cycle + if x <= self.step_ratio: + scale_factor = x / self.step_ratio + else: + scale_factor = (x - 1) / (self.step_ratio - 1) + + lrs = [] + for base_lr, max_lr in zip(self.base_lrs, self.max_lrs): + base_height = (max_lr - base_lr) * scale_factor + if self.scale_mode == 'cycle': + lr = base_lr + base_height * self.scale_fn(cycle) + else: + lr = base_lr + base_height * self.scale_fn(self.last_epoch) + lrs.append(lr) + + if self.cycle_momentum: + momentums = [] + for base_momentum, max_momentum in zip(self.base_momentums, self.max_momentums): + base_height = (max_momentum - base_momentum) * scale_factor + if self.scale_mode == 'cycle': + momentum = max_momentum - base_height * self.scale_fn(cycle) + else: + momentum = max_momentum - base_height * self.scale_fn(self.last_epoch) + momentums.append(momentum) + for param_group, momentum in zip(self.optimizer.param_groups, momentums): + param_group['momentum'] = momentum + + return lrs + + def state_dict(self): + state = super().state_dict() + # We are dropping the `_scale_fn_ref` attribute because it is a + # `weakref.WeakMethod` and can't be pickled. + state.pop('_scale_fn_ref') + fn = state.pop('_scale_fn_custom') + state['_scale_fn_custom'] = None + if fn is not None and not isinstance(fn, types.FunctionType): + # The _scale_fn_custom will only be saved if it is a callable object + # and not if it is a function or lambda. + state['_scale_fn_custom'] = fn.__dict__.copy() + + return state + + def load_state_dict(self, state_dict): + fn = state_dict.pop('_scale_fn_custom') + super().load_state_dict(state_dict) + if fn is not None: + self._scale_fn_custom.__dict__.update(fn) + self._init_scale_fn() + + +class CosineAnnealingWarmRestarts(LRScheduler): + r"""Set the learning rate of each parameter group using a cosine annealing + schedule, where :math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}` + is the number of epochs since the last restart and :math:`T_{i}` is the number + of epochs between two warm restarts in SGDR: + + .. math:: + \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + + \cos\left(\frac{T_{cur}}{T_{i}}\pi\right)\right) + + When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`. + When :math:`T_{cur}=0` after restart, set :math:`\eta_t=\eta_{max}`. + + It has been proposed in + `SGDR: Stochastic Gradient Descent with Warm Restarts`_. + + Args: + optimizer (Optimizer): Wrapped optimizer. + T_0 (int): Number of iterations for the first restart. + T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1. + eta_min (float, optional): Minimum learning rate. Default: 0. + last_epoch (int, optional): The index of last epoch. Default: -1. + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: + https://arxiv.org/abs/1608.03983 + """ + + def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=-1, verbose="deprecated"): + if T_0 <= 0 or not isinstance(T_0, int): + raise ValueError(f"Expected positive integer T_0, but got {T_0}") + if T_mult < 1 or not isinstance(T_mult, int): + raise ValueError(f"Expected integer T_mult >= 1, but got {T_mult}") + if not isinstance(eta_min, (float, int)): + raise ValueError(f"Expected float or int eta_min, but got {eta_min} of type {type(eta_min)}") + self.T_0 = T_0 + self.T_i = T_0 + self.T_mult = T_mult + self.eta_min = eta_min + self.T_cur = last_epoch + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + + return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2 + for base_lr in self.base_lrs] + + def step(self, epoch=None): + """Step could be called after every batch update + + Example: + >>> # xdoctest: +SKIP("Undefined vars") + >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) + >>> iters = len(dataloader) + >>> for epoch in range(20): + >>> for i, sample in enumerate(dataloader): + >>> inputs, labels = sample['inputs'], sample['labels'] + >>> optimizer.zero_grad() + >>> outputs = net(inputs) + >>> loss = criterion(outputs, labels) + >>> loss.backward() + >>> optimizer.step() + >>> scheduler.step(epoch + i / iters) + + This function can be called in an interleaved way. + + Example: + >>> # xdoctest: +SKIP("Undefined vars") + >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) + >>> for epoch in range(20): + >>> scheduler.step() + >>> scheduler.step(26) + >>> scheduler.step() # scheduler.step(27), instead of scheduler(20) + """ + + if epoch is None and self.last_epoch < 0: + epoch = 0 + + if epoch is None: + epoch = self.last_epoch + 1 + self.T_cur = self.T_cur + 1 + if self.T_cur >= self.T_i: + self.T_cur = self.T_cur - self.T_i + self.T_i = self.T_i * self.T_mult + else: + if epoch < 0: + raise ValueError(f"Expected non-negative epoch, but got {epoch}") + if epoch >= self.T_0: + if self.T_mult == 1: + self.T_cur = epoch % self.T_0 + else: + n = int(math.log((epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult)) + self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / (self.T_mult - 1) + self.T_i = self.T_0 * self.T_mult ** (n) + else: + self.T_i = self.T_0 + self.T_cur = epoch + self.last_epoch = math.floor(epoch) + + class _enable_get_lr_call: + + def __init__(self, o): + self.o = o + + def __enter__(self): + self.o._get_lr_called_within_step = True + return self + + def __exit__(self, type, value, traceback): + self.o._get_lr_called_within_step = False + return self + + with _enable_get_lr_call(self): + for i, data in enumerate(zip(self.optimizer.param_groups, self.get_lr())): + param_group, lr = data + param_group['lr'] = lr + + self._last_lr = [group['lr'] for group in self.optimizer.param_groups] + + +class OneCycleLR(LRScheduler): + r"""Sets the learning rate of each parameter group according to the + 1cycle learning rate policy. The 1cycle policy anneals the learning + rate from an initial learning rate to some maximum learning rate and then + from that maximum learning rate to some minimum learning rate much lower + than the initial learning rate. + This policy was initially described in the paper `Super-Convergence: + Very Fast Training of Neural Networks Using Large Learning Rates`_. + + The 1cycle learning rate policy changes the learning rate after every batch. + `step` should be called after a batch has been used for training. + + This scheduler is not chainable. + + Note also that the total number of steps in the cycle can be determined in one + of two ways (listed in order of precedence): + + #. A value for total_steps is explicitly provided. + #. A number of epochs (epochs) and a number of steps per epoch + (steps_per_epoch) are provided. + In this case, the number of total steps is inferred by + total_steps = epochs * steps_per_epoch + + You must either provide a value for total_steps or provide a value for both + epochs and steps_per_epoch. + + The default behaviour of this scheduler follows the fastai implementation of 1cycle, which + claims that "unpublished work has shown even better results by using only two phases". To + mimic the behaviour of the original paper instead, set ``three_phase=True``. + + Args: + optimizer (Optimizer): Wrapped optimizer. + max_lr (float or list): Upper learning rate boundaries in the cycle + for each parameter group. + total_steps (int): The total number of steps in the cycle. Note that + if a value is not provided here, then it must be inferred by providing + a value for epochs and steps_per_epoch. + Default: None + epochs (int): The number of epochs to train for. This is used along + with steps_per_epoch in order to infer the total number of steps in the cycle + if a value for total_steps is not provided. + Default: None + steps_per_epoch (int): The number of steps per epoch to train for. This is + used along with epochs in order to infer the total number of steps in the + cycle if a value for total_steps is not provided. + Default: None + pct_start (float): The percentage of the cycle (in number of steps) spent + increasing the learning rate. + Default: 0.3 + anneal_strategy (str): {'cos', 'linear'} + Specifies the annealing strategy: "cos" for cosine annealing, "linear" for + linear annealing. + Default: 'cos' + cycle_momentum (bool): If ``True``, momentum is cycled inversely + to learning rate between 'base_momentum' and 'max_momentum'. + Default: True + base_momentum (float or list): Lower momentum boundaries in the cycle + for each parameter group. Note that momentum is cycled inversely + to learning rate; at the peak of a cycle, momentum is + 'base_momentum' and learning rate is 'max_lr'. + Default: 0.85 + max_momentum (float or list): Upper momentum boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (max_momentum - base_momentum). + Note that momentum is cycled inversely + to learning rate; at the start of a cycle, momentum is 'max_momentum' + and learning rate is 'base_lr' + Default: 0.95 + div_factor (float): Determines the initial learning rate via + initial_lr = max_lr/div_factor + Default: 25 + final_div_factor (float): Determines the minimum learning rate via + min_lr = initial_lr/final_div_factor + Default: 1e4 + three_phase (bool): If ``True``, use a third phase of the schedule to annihilate the + learning rate according to 'final_div_factor' instead of modifying the second + phase (the first two phases will be symmetrical about the step indicated by + 'pct_start'). + last_epoch (int): The index of the last batch. This parameter is used when + resuming a training job. Since `step()` should be invoked after each + batch instead of after each epoch, this number represents the total + number of *batches* computed, not the total number of epochs computed. + When last_epoch=-1, the schedule is started from the beginning. + Default: -1 + verbose (bool): If ``True``, prints a message to stdout for + each update. Default: ``False``. + + .. deprecated:: 2.2 + ``verbose`` is deprecated. Please use ``get_last_lr()`` to access the + learning rate. + + Example: + >>> # xdoctest: +SKIP + >>> data_loader = torch.utils.data.DataLoader(...) + >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) + >>> scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=len(data_loader), epochs=10) + >>> for epoch in range(10): + >>> for batch in data_loader: + >>> train_batch(...) + >>> optimizer.step() + >>> scheduler.step() + + + .. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates: + https://arxiv.org/abs/1708.07120 + """ + def __init__(self, + optimizer, + max_lr, + total_steps=None, + epochs=None, + steps_per_epoch=None, + pct_start=0.3, + anneal_strategy='cos', + cycle_momentum=True, + base_momentum=0.85, + max_momentum=0.95, + div_factor=25., + final_div_factor=1e4, + three_phase=False, + last_epoch=-1, + verbose="deprecated"): + + # Validate optimizer + if not isinstance(optimizer, Optimizer): + raise TypeError(f'{type(optimizer).__name__} is not an Optimizer') + self.optimizer = optimizer + + # Validate total_steps + if total_steps is None and epochs is None and steps_per_epoch is None: + raise ValueError("You must define either total_steps OR (epochs AND steps_per_epoch)") + elif total_steps is not None: + if total_steps <= 0 or not isinstance(total_steps, int): + raise ValueError(f"Expected positive integer total_steps, but got {total_steps}") + self.total_steps = total_steps + else: + if epochs <= 0 or not isinstance(epochs, int): + raise ValueError(f"Expected positive integer epochs, but got {epochs}") + if steps_per_epoch <= 0 or not isinstance(steps_per_epoch, int): + raise ValueError(f"Expected positive integer steps_per_epoch, but got {steps_per_epoch}") + self.total_steps = epochs * steps_per_epoch + + if three_phase: + self._schedule_phases = [ + { + 'end_step': float(pct_start * self.total_steps) - 1, + 'start_lr': 'initial_lr', + 'end_lr': 'max_lr', + 'start_momentum': 'max_momentum', + 'end_momentum': 'base_momentum', + }, + { + 'end_step': float(2 * pct_start * self.total_steps) - 2, + 'start_lr': 'max_lr', + 'end_lr': 'initial_lr', + 'start_momentum': 'base_momentum', + 'end_momentum': 'max_momentum', + }, + { + 'end_step': self.total_steps - 1, + 'start_lr': 'initial_lr', + 'end_lr': 'min_lr', + 'start_momentum': 'max_momentum', + 'end_momentum': 'max_momentum', + }, + ] + else: + self._schedule_phases = [ + { + 'end_step': float(pct_start * self.total_steps) - 1, + 'start_lr': 'initial_lr', + 'end_lr': 'max_lr', + 'start_momentum': 'max_momentum', + 'end_momentum': 'base_momentum', + }, + { + 'end_step': self.total_steps - 1, + 'start_lr': 'max_lr', + 'end_lr': 'min_lr', + 'start_momentum': 'base_momentum', + 'end_momentum': 'max_momentum', + }, + ] + + # Validate pct_start + if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float): + raise ValueError(f"Expected float between 0 and 1 pct_start, but got {pct_start}") + + # Validate anneal_strategy + if anneal_strategy not in ['cos', 'linear']: + raise ValueError(f"anneal_strategy must by one of 'cos' or 'linear', instead got {anneal_strategy}") + elif anneal_strategy == 'cos': + self.anneal_func = self._annealing_cos + elif anneal_strategy == 'linear': + self.anneal_func = self._annealing_linear + + # Initialize learning rate variables + max_lrs = self._format_param('max_lr', self.optimizer, max_lr) + if last_epoch == -1: + for idx, group in enumerate(self.optimizer.param_groups): + group['initial_lr'] = max_lrs[idx] / div_factor + group['max_lr'] = max_lrs[idx] + group['min_lr'] = group['initial_lr'] / final_div_factor + + # Initialize momentum variables + self.cycle_momentum = cycle_momentum + if self.cycle_momentum: + if 'momentum' not in self.optimizer.defaults and 'betas' not in self.optimizer.defaults: + raise ValueError('optimizer must support momentum with `cycle_momentum` option enabled') + self.use_beta1 = 'betas' in self.optimizer.defaults + max_momentums = self._format_param('max_momentum', optimizer, max_momentum) + base_momentums = self._format_param('base_momentum', optimizer, base_momentum) + if last_epoch == -1: + for m_momentum, b_momentum, group in zip(max_momentums, base_momentums, optimizer.param_groups): + if self.use_beta1: + group['betas'] = (m_momentum, *group['betas'][1:]) + else: + group['momentum'] = m_momentum + group['max_momentum'] = m_momentum + group['base_momentum'] = b_momentum + + super().__init__(optimizer, last_epoch, verbose) + + def _format_param(self, name, optimizer, param): + """Return correctly formatted lr/momentum for each param group.""" + if isinstance(param, (list, tuple)): + if len(param) != len(optimizer.param_groups): + raise ValueError(f"expected {len(optimizer.param_groups)} values for {name}, got {len(param)}") + return param + else: + return [param] * len(optimizer.param_groups) + + @staticmethod + def _annealing_cos(start, end, pct): + "Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0." + cos_out = math.cos(math.pi * pct) + 1 + return end + (start - end) / 2.0 * cos_out + + @staticmethod + def _annealing_linear(start, end, pct): + "Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0." + return (end - start) * pct + start + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn("To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", UserWarning) + + lrs = [] + step_num = self.last_epoch + + if step_num > self.total_steps: + raise ValueError("Tried to step {} times. The specified number of total steps is {}" + .format(step_num, self.total_steps)) + + for group in self.optimizer.param_groups: + start_step = 0 + for i, phase in enumerate(self._schedule_phases): + end_step = phase['end_step'] + if step_num <= end_step or i == len(self._schedule_phases) - 1: + pct = (step_num - start_step) / (end_step - start_step) + computed_lr = self.anneal_func(group[phase['start_lr']], group[phase['end_lr']], pct) + if self.cycle_momentum: + computed_momentum = self.anneal_func(group[phase['start_momentum']], group[phase['end_momentum']], pct) + break + start_step = phase['end_step'] + + lrs.append(computed_lr) + if self.cycle_momentum: + if self.use_beta1: + group['betas'] = (computed_momentum, *group['betas'][1:]) + else: + group['momentum'] = computed_momentum + + return lrs diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/optimizer.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..f96b4bc7e08b3b4327c822d6e5ed8c32ca79368f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/optimizer.py @@ -0,0 +1,907 @@ +import math +import functools +import warnings +from collections import OrderedDict, defaultdict +from copy import deepcopy +from itertools import chain +from typing import ( + Any, + Callable, + DefaultDict, + Dict, + Hashable, + Iterable, + List, + Optional, + Set, + Tuple, + TypeVar, + Union, + cast, + overload, +) +from typing_extensions import ParamSpec, Self, TypeAlias + +import torch +import torch.utils.hooks as hooks +from torch.utils.hooks import RemovableHandle +from torch.utils._foreach_utils import ( + Indices, + TensorListList, + _get_foreach_kernels_supported_devices, + _get_fused_kernels_supported_devices, +) +from torch._utils import is_compiling +from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype + +Args: TypeAlias = Tuple[Any, ...] +Kwargs: TypeAlias = Dict[str, Any] +StateDict: TypeAlias = Dict[str, Any] + +GlobalOptimizerPreHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], Optional[Tuple[Args, Kwargs]]] +GlobalOptimizerPostHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], None] + +__all__ = ['Optimizer', 'register_optimizer_step_pre_hook', 'register_optimizer_step_post_hook'] +_global_optimizer_pre_hooks: Dict[int, GlobalOptimizerPreHook] = OrderedDict() +_global_optimizer_post_hooks: Dict[int, GlobalOptimizerPostHook] = OrderedDict() +_foreach_supported_types = [torch.Tensor, torch.nn.parameter.Parameter] + +class _RequiredParameter: + """Singleton class representing a required parameter for an Optimizer.""" + def __repr__(self) -> str: + return "" + +required = _RequiredParameter() + + +def _use_grad_for_differentiable(func): + def _use_grad(self, *args, **kwargs): + import torch._dynamo + prev_grad = torch.is_grad_enabled() + try: + # Note on graph break below: + # we need to graph break to ensure that aot respects the no_grad annotation. + # This is important for perf because without this, functionalization will generate an epilogue + # which updates the mutated parameters of the optimizer which is *not* visible to inductor, as a result, + # inductor will allocate for every parameter in the model, which is horrible. + # With this, aot correctly sees that this is an inference graph, and functionalization will generate + # an epilogue which is appended to the graph, which *is* visible to inductor, as a result, inductor sees that + # step is in place and is able to avoid the extra allocation. + # In the future, we will either 1) continue to graph break on backward, so this graph break does not matter + # or 2) have a fully fused forward and backward graph, which will have no_grad by default, and we can remove this + # graph break to allow the fully fused fwd-bwd-optimizer graph to be compiled. + # see https://github.com/pytorch/pytorch/issues/104053 + torch.set_grad_enabled(self.defaults['differentiable']) + torch._dynamo.graph_break() + ret = func(self, *args, **kwargs) + finally: + torch._dynamo.graph_break() + torch.set_grad_enabled(prev_grad) + return ret + functools.update_wrapper(_use_grad, func) + return _use_grad + +def _get_value(x): + # item is significantly faster than a cpu tensor in eager mode + if not torch.jit.is_scripting() and is_compiling(): + return x + else: + return x.item() + +def _stack_if_compiling(x): + if not torch.jit.is_scripting() and is_compiling(): + return torch.stack(x) + else: + return x + +def _dispatch_sqrt(x: float): # float annotation is needed because of torchscript type inference + if not torch.jit.is_scripting() and isinstance(x, torch.Tensor): + return x.sqrt() + else: + return math.sqrt(x) + +# For any optimizer with a faster implementation, we attempt to default to the +# fastest + stablest whenever possible. For foreach, the requirements are to have +# native params all on CUDA. For fused, there's currently the additional requirement +# that the tensors' dtypes must be floating point. Neither alternative supports +# torch.jit.script nor differentiable, so we fall back to the single tensor +# implementation in those cases. +def _default_to_fused_or_foreach(params: List[torch.Tensor], + differentiable: bool, + use_fused: bool = False) -> Tuple[bool, bool]: + if torch.jit.is_scripting() or differentiable: + return False, False + + fused_supported_devices = _get_fused_kernels_supported_devices() + foreach_supported_devices = _get_foreach_kernels_supported_devices() + fused = use_fused and all( + p is None or (type(p) in _foreach_supported_types and + p.device.type in fused_supported_devices and + torch.is_floating_point(p)) for p in params + ) + foreach = not fused and all( + p is None or (type(p) in _foreach_supported_types and + p.device.type in foreach_supported_devices) for p in params + ) + return fused, foreach + +def _view_as_real(params, *state_and_grads): + for i, p in enumerate(params): + if torch.is_complex(p): + params[i] = torch.view_as_real(params[i]) + for s in state_and_grads: + s[i] = torch.view_as_real(s[i]) + +# Common doc strings among optimizers +_foreach_doc = r"""foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None)""" + +_fused_doc = r"""fused (bool, optional): whether the fused implementation (CUDA only) is used. + Currently, `torch.float64`, `torch.float32`, `torch.float16`, and `torch.bfloat16` + are supported. (default: None) + + .. note:: The foreach and fused implementations are typically faster than the for-loop, + single-tensor implementation. Thus, if the user has not specified BOTH flags + (i.e., when foreach = fused = None), we will attempt defaulting to the foreach + implementation when the tensors are all on CUDA. For example, if the user specifies + True for fused but nothing for foreach, we will run the fused implementation. If + the user specifies False for foreach but nothing for fused (or False for fused but + nothing for foreach), we will run the for-loop implementation. If the user specifies + True for both foreach and fused, we will prioritize fused over foreach, as it is + typically faster. We attempt to use the fastest, so the hierarchy goes fused -> + foreach -> for-loop. HOWEVER, since the fused implementation is relatively new, + we want to give it sufficient bake-in time, so we default to foreach and NOT + fused when the user has not specified either flag.""" + +_capturable_doc = r"""capturable (bool, optional): whether this instance is safe to + capture in a CUDA graph. Passing True can impair ungraphed performance, + so if you don't intend to graph capture this instance, leave it False + (default: False)""" + +_differentiable_doc = r"""differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False)""" + +_maximize_doc = r"""maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False)""" + + +def register_optimizer_step_pre_hook(hook: GlobalOptimizerPreHook) -> RemovableHandle: + r"""Register a pre hook common to all optimizers. The hook should have the following + signature:: + + hook(optimizer, args, kwargs) -> None or modified args and kwargs + + Args: + hook (Callable): A user defined hook which is registered on all optimizers. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(_global_optimizer_pre_hooks) + _global_optimizer_pre_hooks[handle.id] = hook + return handle + + +def register_optimizer_step_post_hook(hook: GlobalOptimizerPostHook) -> RemovableHandle: + r"""Register a post hook common to all optimizers. The hook should have the following + signature:: + + hook(optimizer, args, kwargs) -> None + + Args: + hook (Callable): A user defined hook which is registered on all optimizers. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(_global_optimizer_post_hooks) + _global_optimizer_post_hooks[handle.id] = hook + return handle + +ParamsT: TypeAlias = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]] + +_P = ParamSpec("_P") +R = TypeVar("R") +T = TypeVar("T") + + +class Optimizer: + r"""Base class for all optimizers. + + .. warning:: + Parameters need to be specified as collections that have a deterministic + ordering that is consistent between runs. Examples of objects that don't + satisfy those properties are sets and iterators over values of dictionaries. + + Args: + params (iterable): an iterable of :class:`torch.Tensor` s or + :class:`dict` s. Specifies what Tensors should be optimized. + defaults: (dict): a dict containing default values of optimization + options (used when a parameter group doesn't specify them). + """ + + OptimizerPreHook: TypeAlias = Callable[[Self, Args, Kwargs], Optional[Tuple[Args, Kwargs]]] # type: ignore[misc] + OptimizerPostHook: TypeAlias = Callable[[Self, Args, Kwargs], None] # type: ignore[misc] + + _optimizer_step_pre_hooks: Dict[int, OptimizerPreHook] + _optimizer_step_post_hooks: Dict[int, OptimizerPostHook] + _optimizer_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]' + _optimizer_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]' + _optimizer_load_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]' + _optimizer_load_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]' + + def __init__(self, params: ParamsT, defaults: Dict[str, Any]) -> None: + torch._C._log_api_usage_once("python.optimizer") + self.defaults = defaults + self._optimizer_step_pre_hooks = OrderedDict() + self._optimizer_step_post_hooks = OrderedDict() + self._optimizer_state_dict_pre_hooks = OrderedDict() + self._optimizer_state_dict_post_hooks = OrderedDict() + self._optimizer_load_state_dict_pre_hooks = OrderedDict() + self._optimizer_load_state_dict_post_hooks = OrderedDict() + + self._patch_step_function() + + if isinstance(params, torch.Tensor): + if self.__class__.__name__ == 'SparseAdam': + warnings.warn(("Passing in a raw Tensor as ``params`` to SparseAdam " + "is deprecated. In the future, this will raise an error. " + "Please wrap your Tensor in an iterable instead."), + FutureWarning) + else: + raise TypeError("params argument given to the optimizer should be " + "an iterable of Tensors or dicts, but got " + + torch.typename(params)) + + self.state: DefaultDict[torch.Tensor, Any] = defaultdict(dict) + self.param_groups: List[Dict[str, Any]] = [] + + param_groups = list(params) + if len(param_groups) == 0: + raise ValueError("optimizer got an empty parameter list") + if not isinstance(param_groups[0], dict): + param_groups = [{'params': param_groups}] + + for param_group in param_groups: + self.add_param_group(cast(dict, param_group)) + + # Allows _cuda_graph_capture_health_check to rig a poor man's TORCH_WARN_ONCE in python, + # which I don't think exists + # https://github.com/pytorch/pytorch/issues/72948 + self._warned_capturable_if_run_uncaptured = True + + def __getstate__(self) -> Dict[str, Any]: + return { + 'defaults': self.defaults, + 'state': self.state, + 'param_groups': self.param_groups, + } + + def __setstate__(self, state: Dict[str, Any]) -> None: + self.__dict__.update(state) + if '_optimizer_step_pre_hooks' not in self.__dict__: + self._optimizer_step_pre_hooks = OrderedDict() + if '_optimizer_step_post_hooks' not in self.__dict__: + self._optimizer_step_post_hooks = OrderedDict() + if '_optimizer_state_dict_pre_hooks' not in self.__dict__: + self._optimizer_state_dict_pre_hooks = OrderedDict() + if '_optimizer_state_dict_post_hooks' not in self.__dict__: + self._optimizer_state_dict_post_hooks = OrderedDict() + if '_optimizer_load_state_dict_pre_hooks' not in self.__dict__: + self._optimizer_load_state_dict_pre_hooks = OrderedDict() + if '_optimizer_load_state_dict_post_hooks' not in self.__dict__: + self._optimizer_load_state_dict_post_hooks = OrderedDict() + self._patch_step_function() # To support multiprocessing pickle/unpickle + self.defaults.setdefault('differentiable', False) + + def __repr__(self) -> str: + format_string = self.__class__.__name__ + ' (' + for i, group in enumerate(self.param_groups): + format_string += '\n' + format_string += f'Parameter Group {i}\n' + for key in sorted(group.keys()): + if key != 'params': + format_string += f' {key}: {group[key]}\n' + format_string += ')' + return format_string + + # Currently needed by Adam and AdamW + def _cuda_graph_capture_health_check(self) -> None: + # Note [torch.compile x capturable] + # If we are compiling, we try to take the capturable path automatically by + # setting the flag to True during tracing. Due to this, we skip all the checks + # normally required for determining whether we can use CUDA graphs and + # shunt the responsibility to torch.inductor. This saves time during tracing + # since the checks are slow without sacrificing UX since inductor will warn + # later if CUDA graphs cannot be enabled, e.g., + # https://github.com/pytorch/pytorch/blob/d3ba8901d8640eb16f88b2bfef9df7fa383d4b47/torch/_inductor/compile_fx.py#L390. + # Thus, when compiling, inductor will determine if cudagraphs + # can be enabled based on whether there is input mutation or CPU tensors. + if not is_compiling() and torch.backends.cuda.is_built() and torch.cuda.is_available(): + capturing = torch.cuda.is_current_stream_capturing() + + if capturing and not all(group['capturable'] for group in self.param_groups): + raise RuntimeError("Attempting CUDA graph capture of step() for an instance of " + + self.__class__.__name__ + + " but param_groups' capturable is False.") + + if ( + (not getattr(self, "_warned_capturable_if_run_uncaptured", False)) + and all(group['capturable'] for group in self.param_groups) + and (not capturing) + ): + warnings.warn( + "This instance was constructed with capturable=True or some of all the param_groups came with capturable=True, " + "but step() is running without CUDA graph capture. If you never intend to graph-capture this " + "instance, capturable=True can impair performance, and you should set capturable=False." + ) + self._warned_capturable_if_run_uncaptured = True + + def _optimizer_step_code(self) -> None: + """Entry point for `torch.profile.profiler`. + + When python tracing is enabled the profiler will hook into this + function at the CPython level to inspect the optimizer's parameters and + param groups. It is called it after `step()` since many optimizers + lazily initialize state. + + This is a workaround due to lack of a proper step hook on the optimizer, + and will be removed if it exists. + """ + pass + + @staticmethod + def profile_hook_step(func: Callable[_P, R]) -> Callable[_P, R]: + + @functools.wraps(func) + def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> R: + self, *_ = args + self = cast(Optimizer, self) + profile_name = f"Optimizer.step#{self.__class__.__name__}.step" + with torch.autograd.profiler.record_function(profile_name): + # call optimizer step pre hooks + for pre_hook in chain(_global_optimizer_pre_hooks.values(), self._optimizer_step_pre_hooks.values()): + result = pre_hook(self, args, kwargs) + if result is not None: + if isinstance(result, tuple) and len(result) == 2: + args, kwargs = result # type: ignore[assignment] + else: + raise RuntimeError( + f"{func} must return None or a tuple of (new_args, new_kwargs), but got {result}." + ) + + out = func(*args, **kwargs) + self._optimizer_step_code() + + # call optimizer step post hooks + for post_hook in chain(self._optimizer_step_post_hooks.values(), _global_optimizer_post_hooks.values()): + post_hook(self, args, kwargs) + + return out + + return wrapper + + @staticmethod + def _group_tensors_by_device_and_dtype( + tensorlistlist: TensorListList, + with_indices: bool = False, + ) -> Union[ + Dict[Tuple[None, None], Tuple[TensorListList, Indices]], + Dict[Tuple[torch.device, torch.dtype], Tuple[TensorListList, Indices]], + ]: + """Groups a list of lists of tensors by device and dtype. + Skips this step if we are compiling since this will occur during inductor lowering.""" + if is_compiling(): + return {(None, None): (tensorlistlist, list(range(len(tensorlistlist[0]))))} + else: + return _group_tensors_by_device_and_dtype(tensorlistlist, with_indices) + + def _patch_step_function(self) -> None: + self._zero_grad_profile_name = f"Optimizer.zero_grad#{self.__class__.__name__}.zero_grad" + hooked = getattr(self.__class__.step, "hooked", None) + if not hooked: + self.__class__.step = self.profile_hook_step(self.__class__.step) # type: ignore[assignment] + self.__class__.step.hooked = True # type: ignore[attr-defined] + + def register_step_pre_hook(self, hook: OptimizerPreHook) -> RemovableHandle: + r"""Register an optimizer step pre hook which will be called before + optimizer step. It should have the following signature:: + + hook(optimizer, args, kwargs) -> None or modified args and kwargs + + The ``optimizer`` argument is the optimizer instance being used. If + args and kwargs are modified by the pre-hook, then the transformed + values are returned as a tuple containing the new_args and new_kwargs. + + Args: + hook (Callable): The user defined hook to be registered. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_step_pre_hooks) + self._optimizer_step_pre_hooks[handle.id] = hook + return handle + + def register_step_post_hook(self, hook: OptimizerPostHook) -> RemovableHandle: + r"""Register an optimizer step post hook which will be called after optimizer step. + It should have the following signature:: + + hook(optimizer, args, kwargs) -> None + + The ``optimizer`` argument is the optimizer instance being used. + + Args: + hook (Callable): The user defined hook to be registered. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_step_post_hooks) + self._optimizer_step_post_hooks[handle.id] = hook + return handle + + + def register_state_dict_pre_hook( + self, hook: Callable[["Optimizer"], None], prepend: bool = False + ) -> RemovableHandle: + r"""Register a state dict pre-hook which will be called before + :meth:`~torch.optim.Optimizer.state_dict` is called. It should have the + following signature:: + + hook(optimizer) -> None + + The ``optimizer`` argument is the optimizer instance being used. + The hook will be called with argument ``self`` before calling ``state_dict`` on ``self``. + The registered hook can be used to perform pre-processing before the ``state_dict`` + call is made. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided pre ``hook`` will be fired before + all the already registered pre-hooks on ``state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + pre-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_state_dict_pre_hooks) + self._optimizer_state_dict_pre_hooks[handle.id] = hook + if prepend: + self._optimizer_state_dict_pre_hooks.move_to_end(handle.id, last=False) + return handle + + + def register_state_dict_post_hook( + self, + hook: Callable[["Optimizer", StateDict], Optional[StateDict]], + prepend: bool = False, + ) -> RemovableHandle: + r"""Register a state dict post-hook which will be called after + :meth:`~torch.optim.Optimizer.state_dict` is called. It should have the + following signature:: + + hook(optimizer, state_dict) -> state_dict or None + + The hook will be called with arguments ``self`` and ``state_dict`` after generating + a ``state_dict`` on ``self``. The hook may modify the state_dict inplace or optionally + return a new one. The registered hook can be used to perform post-processing + on the ``state_dict`` before it is returned. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided post ``hook`` will be fired before + all the already registered post-hooks on ``state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + post-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_state_dict_post_hooks) + self._optimizer_state_dict_post_hooks[handle.id] = hook + if prepend: + self._optimizer_state_dict_post_hooks.move_to_end(handle.id, last=False) + return handle + + + @torch._disable_dynamo + def state_dict(self) -> StateDict: + r"""Returns the state of the optimizer as a :class:`dict`. + + It contains two entries: + + * ``state``: a Dict holding current optimization state. Its content + differs between optimizer classes, but some common characteristics + hold. For example, state is saved per parameter, and the parameter + itself is NOT saved. ``state`` is a Dictionary mapping parameter ids + to a Dict with state corresponding to each parameter. + * ``param_groups``: a List containing all parameter groups where each + parameter group is a Dict. Each parameter group contains metadata + specific to the optimizer, such as learning rate and weight decay, + as well as a List of parameter IDs of the parameters in the group. + + NOTE: The parameter IDs may look like indices but they are just IDs + associating state with param_group. When loading from a state_dict, + the optimizer will zip the param_group ``params`` (int IDs) and the + optimizer ``param_groups`` (actual ``nn.Parameter`` s) in order to + match state WITHOUT additional verification. + + A returned state dict might look something like: + + .. code-block:: text + + { + 'state': { + 0: {'momentum_buffer': tensor(...), ...}, + 1: {'momentum_buffer': tensor(...), ...}, + 2: {'momentum_buffer': tensor(...), ...}, + 3: {'momentum_buffer': tensor(...), ...} + }, + 'param_groups': [ + { + 'lr': 0.01, + 'weight_decay': 0, + ... + 'params': [0] + }, + { + 'lr': 0.001, + 'weight_decay': 0.5, + ... + 'params': [1, 2, 3] + } + ] + } + + """ + + for pre_hook in self._optimizer_state_dict_pre_hooks.values(): + pre_hook(self) + + # Save order indices instead of Tensors + param_mappings: Dict[int, int] = {} + start_index = 0 + + def pack_group(group: Dict[str, Any]) -> Dict[str, Any]: + nonlocal start_index + packed = {k: v for k, v in group.items() if k != 'params'} + param_mappings.update({id(p): i for i, p in enumerate(group['params'], start_index) + if id(p) not in param_mappings}) + packed['params'] = [param_mappings[id(p)] for p in group['params']] + start_index += len(packed['params']) + return packed + param_groups = [pack_group(g) for g in self.param_groups] + # Remap state to use order indices as keys + packed_state = {(param_mappings[id(k)] if isinstance(k, torch.Tensor) else k): v + for k, v in self.state.items()} + + state_dict = { + 'state': packed_state, + 'param_groups': param_groups, + } + + for post_hook in self._optimizer_state_dict_post_hooks.values(): + hook_result = post_hook(self, state_dict) + if hook_result is not None: + state_dict = hook_result + return state_dict + + @staticmethod + def _process_value_according_to_param_policy( + param: torch.Tensor, + value: torch.Tensor, + param_id: int, + param_groups: List[Dict[Any, Any]], + key: Hashable = None, + ) -> torch.Tensor: + # Floating-point types are a bit special here. They are the only ones + # that are assumed to always match the type of params. + # Make sure state['step'] is not casted https://github.com/pytorch/pytorch/issues/74424 + # UNLESS fused or capturable, see note [special device hosting for step] + fused = False + capturable = False + assert param_groups is not None + for pg in param_groups: + if param_id in pg["params"]: + fused = pg["fused"] if "fused" in pg else False + capturable = pg["capturable"] if "capturable" in pg else False + break + + if key == 'step': + if capturable or fused: + return value.to(dtype=torch.float32, device=param.device) + else: + return value + else: + if param.is_floating_point(): + return value.to(dtype=param.dtype, device=param.device) + else: + return value.to(device=param.device) + + + def register_load_state_dict_pre_hook( + self, + hook: Callable[["Optimizer", StateDict], Optional[StateDict]], + prepend: bool = False, + ) -> RemovableHandle: + r"""Register a load_state_dict pre-hook which will be called before + :meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the + following signature:: + + hook(optimizer, state_dict) -> state_dict or None + + The ``optimizer`` argument is the optimizer instance being used and the + ``state_dict`` argument is a shallow copy of the ``state_dict`` the user + passed in to ``load_state_dict``. The hook may modify the state_dict inplace + or optionally return a new one. If a state_dict is returned, it will be used + to be loaded into the optimizer. + + The hook will be called with argument ``self`` and ``state_dict`` before + calling ``load_state_dict`` on ``self``. The registered hook can be used to + perform pre-processing before the ``load_state_dict`` call is made. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided pre ``hook`` will be fired before + all the already registered pre-hooks on ``load_state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + pre-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_load_state_dict_pre_hooks) + self._optimizer_load_state_dict_pre_hooks[handle.id] = hook + if prepend: + self._optimizer_load_state_dict_pre_hooks.move_to_end(handle.id, last=False) + return handle + + + def register_load_state_dict_post_hook( + self, hook: Callable[["Optimizer"], None], prepend: bool = False + ) -> RemovableHandle: + r"""Register a load_state_dict post-hook which will be called after + :meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the + following signature:: + + hook(optimizer) -> None + + The ``optimizer`` argument is the optimizer instance being used. + + The hook will be called with argument ``self`` after calling + ``load_state_dict`` on ``self``. The registered hook can be used to + perform post-processing after ``load_state_dict`` has loaded the + ``state_dict``. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided post ``hook`` will be fired before + all the already registered post-hooks on ``load_state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + post-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_load_state_dict_post_hooks) + self._optimizer_load_state_dict_post_hooks[handle.id] = hook + if prepend: + self._optimizer_load_state_dict_post_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] + return handle + + + @torch._disable_dynamo + def load_state_dict(self, state_dict: StateDict) -> None: + r"""Loads the optimizer state. + + Args: + state_dict (dict): optimizer state. Should be an object returned + from a call to :meth:`state_dict`. + """ + # shallow copy, to be consistent with module API + state_dict = state_dict.copy() + + for pre_hook in self._optimizer_load_state_dict_pre_hooks.values(): + hook_result = pre_hook(self, state_dict) + if hook_result is not None: + state_dict = hook_result + + # Validate the state_dict + groups = self.param_groups + + # Deepcopy as we write into saved_groups later to update state + saved_groups = deepcopy(state_dict['param_groups']) + + if len(groups) != len(saved_groups): + raise ValueError("loaded state dict has a different number of " + "parameter groups") + param_lens = (len(g['params']) for g in groups) + saved_lens = (len(g['params']) for g in saved_groups) + if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)): + raise ValueError("loaded state dict contains a parameter group " + "that doesn't match the size of optimizer's group") + + # Update the state + id_map = dict(zip(chain.from_iterable(g['params'] for g in saved_groups), + chain.from_iterable(g['params'] for g in groups))) + + def _cast(param, value, param_id=None, param_groups=None, key=None): + r"""Make a deep copy of value, casting all tensors to device of param.""" + if isinstance(value, torch.Tensor): + return Optimizer._process_value_according_to_param_policy(param, value, param_id, param_groups, key) + elif isinstance(value, dict): + return {k: _cast(param, v, param_id=param_id, param_groups=param_groups, key=k) for k, v in value.items()} + elif isinstance(value, Iterable): + return type(value)(_cast(param, v, param_id=param_id, param_groups=param_groups) for v in value) # type: ignore[call-arg] + else: + return value + + # Copy state assigned to params (and cast tensors to appropriate types). + # State that is not assigned to params is copied as is (needed for + # backward compatibility). + state: DefaultDict[torch.Tensor, Dict[Any, Any]] = defaultdict(dict) + for k, v in state_dict['state'].items(): + if k in id_map: + param = id_map[k] + state[param] = _cast(param, v, param_id=k, param_groups=state_dict['param_groups']) + else: + state[k] = v + + # Update parameter groups, setting their 'params' value + def update_group(group: Dict[str, Any], new_group: Dict[str, Any]) -> Dict[str, Any]: + new_group['params'] = group['params'] + return new_group + param_groups = [ + update_group(g, ng) for g, ng in zip(groups, saved_groups)] + self.__setstate__({'state': state, 'param_groups': param_groups}) + + for post_hook in self._optimizer_load_state_dict_post_hooks.values(): + post_hook(self) + + + @torch._disable_dynamo + def zero_grad(self, set_to_none: bool = True) -> None: + r"""Resets the gradients of all optimized :class:`torch.Tensor` s. + + Args: + set_to_none (bool): instead of setting to zero, set the grads to None. + This will in general have lower memory footprint, and can modestly improve performance. + However, it changes certain behaviors. For example: + 1. When the user tries to access a gradient and perform manual ops on it, + a None attribute or a Tensor full of 0s will behave differently. + 2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s + are guaranteed to be None for params that did not receive a gradient. + 3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None + (in one case it does the step with a gradient of 0 and in the other it skips + the step altogether). + """ + foreach = self.defaults.get('foreach', False) or self.defaults.get('fused', False) + + if not hasattr(self, "_zero_grad_profile_name"): + self._patch_step_function() + + per_device_and_dtype_grads: Optional[DefaultDict[torch.device, DefaultDict[torch.dtype, List[torch.Tensor]]]] + if foreach: + per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) + else: + per_device_and_dtype_grads = None + + with torch.autograd.profiler.record_function(self._zero_grad_profile_name): + for group in self.param_groups: + for p in group['params']: + if p.grad is not None: + if set_to_none: + p.grad = None + else: + if p.grad.grad_fn is not None: + p.grad.detach_() + else: + p.grad.requires_grad_(False) + if (not foreach or p.grad.is_sparse): + p.grad.zero_() + else: + assert per_device_and_dtype_grads is not None + per_device_and_dtype_grads[p.grad.device][p.grad.dtype].append(p.grad) + if foreach: + assert per_device_and_dtype_grads is not None + for per_dtype_grads in per_device_and_dtype_grads.values(): + for grads in per_dtype_grads.values(): + torch._foreach_zero_(grads) + + @overload + def step(self, closure: None = ...) -> None: + ... + + @overload + def step(self, closure: Callable[[], float]) -> float: + ... + + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: + r"""Performs a single optimization step (parameter update). + + Args: + closure (Callable): A closure that reevaluates the model and + returns the loss. Optional for most optimizers. + + .. note:: + Unless otherwise specified, this function should not modify the + ``.grad`` field of the parameters. + """ + raise NotImplementedError + + @torch._disable_dynamo + def add_param_group(self, param_group: Dict[str, Any]) -> None: + r"""Add a param group to the :class:`Optimizer` s `param_groups`. + + This can be useful when fine tuning a pre-trained network as frozen layers can be made + trainable and added to the :class:`Optimizer` as training progresses. + + Args: + param_group (dict): Specifies what Tensors should be optimized along with group + specific optimization options. + """ + if not isinstance(param_group, dict): + raise TypeError(f"param_group must be a dict, but got {type(param_group)}") + + params = param_group['params'] + if isinstance(params, torch.Tensor): + param_group['params'] = [params] + elif isinstance(params, set): + raise TypeError('optimizer parameters need to be organized in ordered collections, but ' + 'the ordering of tensors in sets will change between runs. Please use a list instead.') + else: + param_group['params'] = list(params) + + for param in param_group['params']: + if not isinstance(param, torch.Tensor): + raise TypeError("optimizer can only optimize Tensors, " + "but one of the params is " + torch.typename(param)) + if not self.defaults.get('differentiable', None) and not (param.is_leaf or param.retains_grad): + raise ValueError("can't optimize a non-leaf Tensor") + + for name, default in self.defaults.items(): + if default is required and name not in param_group: + raise ValueError(f"parameter group didn't specify a value of required optimization parameter {name}") + else: + param_group.setdefault(name, default) + + params = param_group['params'] + if len(params) != len(set(params)): + warnings.warn("optimizer contains a parameter group with duplicate parameters; " + "in future, this will cause an error; " + "see github.com/pytorch/pytorch/issues/40967 for more information", stacklevel=3) + + param_set: Set[torch.Tensor] = set() + for group in self.param_groups: + param_set.update(set(group['params'])) + + if not param_set.isdisjoint(set(param_group['params'])): + raise ValueError("some parameters appear in more than one parameter group") + + self.param_groups.append(param_group) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/radam.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/radam.py new file mode 100644 index 0000000000000000000000000000000000000000..60ae225ab495eb97475d05e14c5b2e7914fd03ac --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/radam.py @@ -0,0 +1,434 @@ +import math +from typing import List, Optional + +import torch +from torch import Tensor + +from .optimizer import ( + Optimizer, + _default_to_fused_or_foreach, + _differentiable_doc, + _dispatch_sqrt, + _foreach_doc, + _get_value, + _stack_if_compiling, + _use_grad_for_differentiable, + _view_as_real, +) + +__all__ = ["RAdam", "radam"] + + +class RAdam(Optimizer): + def __init__( + self, + params, + lr=1e-3, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0, + decoupled_weight_decay: bool = False, + *, + foreach: Optional[bool] = None, + differentiable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + foreach=foreach, + decoupled_weight_decay=decoupled_weight_decay, + differentiable=differentiable, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("foreach", None) + group.setdefault("differentiable", False) + group.setdefault("decoupled_weight_decay", False) + state_values = list(self.state.values()) + step_is_tensor = (len(state_values) != 0) and torch.is_tensor( + state_values[0]["step"] + ) + if not step_is_tensor: + for s in state_values: + s["step"] = torch.tensor(float(s["step"]), dtype=torch.float32) + + def _init_group(self, group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps): + has_complex = False + for p in group["params"]: + if p.grad is not None: + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError("RAdam does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + # Lazy state initialization + if len(state) == 0: + state["step"] = torch.tensor(0.0, dtype=torch.float32) + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + state_steps.append(state["step"]) + + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + state_steps = [] + beta1, beta2 = group["betas"] + + has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps) + + radam( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + state_steps, + beta1=beta1, + beta2=beta2, + lr=group["lr"], + weight_decay=group["weight_decay"], + eps=group["eps"], + foreach=group["foreach"], + differentiable=group["differentiable"], + decoupled_weight_decay=group["decoupled_weight_decay"], + has_complex=has_complex, + ) + + return loss + + +RAdam.__doc__ = r"""Implements RAdam algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \: \beta_1, \beta_2 + \text{ (betas)}, \: \theta_0 \text{ (params)}, \:f(\theta) \text{ (objective)}, \: + \lambda \text{ (weightdecay)}, \\ + &\hspace{13mm} \epsilon \text{ (epsilon)}, \textit{decoupled\_weight\_decay} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + v_0 \leftarrow 0 \text{ ( second moment)}, \\ + &\hspace{18mm} \rho_{\infty} \leftarrow 2/(1-\beta_2) -1 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{6mm} g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{6mm} \theta_t \leftarrow \theta_{t-1} \\ + &\hspace{6mm} \textbf{if} \: \lambda \neq 0 \\ + &\hspace{12mm}\textbf{if} \: \textit{decoupled\_weight\_decay} \\ + &\hspace{18mm} \theta_t \leftarrow \theta_{t} - \gamma \lambda \theta_{t} \\ + &\hspace{12mm}\textbf{else} \\ + &\hspace{18mm} g_t \leftarrow g_t + \lambda \theta_{t} \\ + &\hspace{6mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{6mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{6mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ + &\hspace{6mm}\rho_t \leftarrow \rho_{\infty} - + 2 t \beta^t_2 /\big(1-\beta_2^t \big) \\[0.1.ex] + &\hspace{6mm}\textbf{if} \: \rho_t > 5 \\ + &\hspace{12mm} l_t \leftarrow \frac{\sqrt{ (1-\beta^t_2) }}{ \sqrt{v_t} +\epsilon } \\ + &\hspace{12mm} r_t \leftarrow + \sqrt{\frac{(\rho_t-4)(\rho_t-2)\rho_{\infty}}{(\rho_{\infty}-4)(\rho_{\infty}-2) \rho_t}} \\ + &\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t} r_t l_t \\ + &\hspace{6mm}\textbf{else} \\ + &\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `On the variance of the adaptive learning rate and beyond`_. + + This implementation provides an option to use either the original weight_decay implementation as in Adam + (where the weight_decay is applied to the gradient) or the one from AdamW (where weight_decay is applied + to the weight) through the decoupled_weight_decay option. When decoupled_weight_decay is set to False + (default), it uses the original Adam style weight decay, otherwise, it uses the AdamW style which + corresponds more closely to the `author's implementation`_ in the RAdam paper. Further information + about decoupled weight decay can be found in `Decoupled Weight Decay Regularization`_. + + """ + fr""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + decoupled_weight_decay (bool, optional): whether to use decoupled weight + decay as in AdamW to obtain RAdamW (default: False) + {_foreach_doc} + {_differentiable_doc} + + .. _On the variance of the adaptive learning rate and beyond: + https://arxiv.org/abs/1908.03265 + .. _author's implementation: + https://github.com/LiyuanLucasLiu/RAdam + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + + """ + + +def radam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + decoupled_weight_decay: bool = False, + foreach: Optional[bool] = None, + differentiable: bool = False, + has_complex: bool = False, + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + eps: float, +): + r"""Functional API that performs RAdam algorithm computation. + + See :class:`~torch.optim.RAdam` for details. + """ + + if not all(isinstance(t, torch.Tensor) for t in state_steps): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + if foreach is None: + _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_radam + else: + func = _single_tensor_radam + + func( + params, + grads, + exp_avgs, + exp_avg_sqs, + state_steps, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + eps=eps, + decoupled_weight_decay=decoupled_weight_decay, + differentiable=differentiable, + has_complex=has_complex, + ) + + +def _single_tensor_radam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + eps: float, + differentiable: bool, + decoupled_weight_decay: bool, + has_complex: bool, +): + + for i, param in enumerate(params): + grad = grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + step_t = state_steps[i] + + if torch.is_complex(param): + param = torch.view_as_real(param) + grad = torch.view_as_real(grad) + exp_avg = torch.view_as_real(exp_avg) + exp_avg_sq = torch.view_as_real(exp_avg_sq) + + # update step + step_t += 1 + step = _get_value(step_t) + + bias_correction1 = 1 - beta1 ** step + bias_correction2 = 1 - beta2 ** step + + if weight_decay != 0: + if decoupled_weight_decay: + param.mul_(1 - lr * weight_decay) + else: + grad = grad.add(param, alpha=weight_decay) + + # Decay the first and second moment running average coefficient + exp_avg.lerp_(grad, 1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + # correcting bias for the first moving moment + bias_corrected_exp_avg = exp_avg / bias_correction1 + + # maximum length of the approximated SMA + rho_inf = 2 / (1 - beta2) - 1 + # compute the length of the approximated SMA + rho_t = rho_inf - 2 * step * (beta2 ** step) / bias_correction2 + + if rho_t > 5.0: + # Compute the variance rectification term and update parameters accordingly + rect = math.sqrt( + (rho_t - 4) + * (rho_t - 2) + * rho_inf + / ((rho_inf - 4) * (rho_inf - 2) * rho_t) + ) + exp_avg_sq_sqrt = exp_avg_sq.sqrt() + if differentiable: + exp_avg_sq_sqrt = exp_avg_sq_sqrt.add(eps) + else: + exp_avg_sq_sqrt = exp_avg_sq_sqrt.add_(eps) + adaptive_lr = math.sqrt(bias_correction2) / exp_avg_sq_sqrt + param.add_(bias_corrected_exp_avg * lr * adaptive_lr * rect, alpha=-1.0) + else: + param.add_(bias_corrected_exp_avg * lr, alpha=-1.0) + + +def _multi_tensor_radam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + eps: float, + decoupled_weight_decay: bool, + differentiable: bool, + has_complex: bool, +): + + if len(params) == 0: + return + + assert not differentiable, "_foreach ops don't support autograd" + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, exp_avgs, exp_avg_sqs, state_steps]) + for (( + grouped_params, + grouped_grads, + grouped_exp_avgs, + grouped_exp_avg_sqs, + grouped_state_steps, + ), _) in grouped_tensors.values(): + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if grouped_state_steps[0].is_cpu: + torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0) + else: + torch._foreach_add_(grouped_state_steps, 1) + + if has_complex: + _view_as_real(grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_avg_sqs) + + # maximum length of the approximated SMA + rho_inf = 2 / (1 - beta2) - 1 + # compute the length of the approximated SMA + rho_t_list = [rho_inf - 2 * _get_value(step) * (beta2 ** _get_value(step)) / + (1 - beta2 ** _get_value(step)) for step in grouped_state_steps] + + if weight_decay != 0: + if decoupled_weight_decay: + torch._foreach_mul_(grouped_params, 1 - lr * weight_decay) + else: + grouped_grads = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay) + + # Decay the first and second moment running average coefficient + torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1) + + torch._foreach_mul_(grouped_exp_avg_sqs, beta2) + torch._foreach_addcmul_(grouped_exp_avg_sqs, grouped_grads, grouped_grads, 1 - beta2) + + # Delete the local intermediate since it won't be used anymore to save on peak memory + del grouped_grads + + rect = [ + _dispatch_sqrt( + (rho_t - 4) + * (rho_t - 2) + * rho_inf + / ((rho_inf - 4) * (rho_inf - 2) * rho_t) + ) + if rho_t > 5 + else 0 + for rho_t in rho_t_list + ] + unrectified = [0 if rect > 0 else 1.0 for rect in rect] + + bias_correction1 = [1 - beta1 ** _get_value(step) for step in grouped_state_steps] + unrect_step_size = _stack_if_compiling([(lr * rect / bc) * -1 for rect, bc in zip(unrectified, bias_correction1)]) + bias_correction2_sqrt_times_rect_step_size = [ + _dispatch_sqrt(1 - beta2 ** _get_value(step)) * (lr * rect / bc) * -1 + for step, rect, bc in zip(grouped_state_steps, rect, bias_correction1) + ] + + buffer = torch._foreach_sqrt(grouped_exp_avg_sqs) + torch._foreach_add_(buffer, eps) + torch._foreach_div_(buffer, bias_correction2_sqrt_times_rect_step_size) + torch._foreach_reciprocal_(buffer) + torch._foreach_add_(buffer, unrect_step_size) + + # Here, buffer = sqrt(1 - beta2^t) * rect_step_size / (sqrt(v) + eps) + unrect_step_size + torch._foreach_addcmul_(grouped_params, grouped_exp_avgs, buffer) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/sgd.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/sgd.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ba1bcd60a1b89c4e02ce8851766d2f21e4b7050e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/sgd.pyi @@ -0,0 +1,12 @@ +from .optimizer import Optimizer, ParamsT + +class SGD(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + momentum: float = ..., + dampening: float = ..., + weight_decay: float = ..., + nesterov: bool = ..., + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/sparse_adam.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/sparse_adam.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a84001d590b8c0187242e43cdf4890cb3ee81729 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/sparse_adam.pyi @@ -0,0 +1,12 @@ +from typing import Tuple + +from .optimizer import Optimizer, ParamsT + +class SparseAdam(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + betas: Tuple[float, float] = ..., + eps: float = ..., + ) -> None: ...