diff --git a/ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..5b58dadbdd35aad18141f35fece5f5bfd3d50ae0 --- /dev/null +++ b/ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb85a7743d2d09da9ec7f26db1767aaaeef955d4077692bd576c4ecd86c262da +size 33555533 diff --git a/ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..a8e8a789a992c8e46cd73cd8a61307fadbb364db --- /dev/null +++ b/ckpts/universal/global_step120/zero/16.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3fc099776710bd3754d0c00898989896ebfccd8bfc8166db8f399c0b25a1582 +size 33555533 diff --git a/ckpts/universal/global_step120/zero/4.attention.dense.weight/fp32.pt b/ckpts/universal/global_step120/zero/4.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..27eae7d6f6f7a4523272963e9406dac3b09c0b03 --- /dev/null +++ b/ckpts/universal/global_step120/zero/4.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68d950b662024ae4be69b37e2feeb330719bc7a596c4452c5a3eeab689f31ba4 +size 16778317 diff --git a/venv/lib/python3.10/site-packages/torch/_logging/__init__.py b/venv/lib/python3.10/site-packages/torch/_logging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4cfe8ba099371ee9faebe1b503fe3a8b05ddcf9e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_logging/__init__.py @@ -0,0 +1,16 @@ +# Top level logging module for torch logging +# Design doc: https://docs.google.com/document/d/1ZRfTWKa8eaPq1AxaiHrq4ASTPouzzlPiuquSBEJYwS8/edit# +# Simple setup for onboarding (see above doc for more detail): +# 1. register any top-level log qualified name for your module in torch._logging._registrations (see there for examples) +# 2. register any artifacts ( below) in torch._logging._registrations +# a. call getArtifactLogger(__name__, ) at your logging site instead of the standard logger to log your artifact +import torch._logging._registrations +from ._internal import ( + _init_logs, + DEFAULT_LOGGING, + getArtifactLogger, + LazyString, + set_logs, + trace_structured, + warning_once, +) diff --git a/venv/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14caeb7c2f02aafabd568d4b5ac7e326ad76642c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..560ba1681389d5b32d6955494102968251112a1f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8af45cd75748065e260d2f9ab11fdaf321bd664 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_logging/__pycache__/structured.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_logging/__pycache__/structured.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a557ff6c61c6e12b44d06237a15f8001ac72b3d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_logging/__pycache__/structured.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_logging/_internal.py b/venv/lib/python3.10/site-packages/torch/_logging/_internal.py new file mode 100644 index 0000000000000000000000000000000000000000..b2b34756a42d77809fc3ca5554c6494329edcbe2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_logging/_internal.py @@ -0,0 +1,1085 @@ +import functools +import hashlib +import itertools +import json +import logging +import os +import os.path +import re +import tempfile +from dataclasses import dataclass, field +from importlib import __import__ +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union +from weakref import WeakSet + +log = logging.getLogger(__name__) + +# This is a synthetic logger which doesn't correspond to an actual logger, +# but handles all of our "tracing" logging, which is structured and doesn't go +# to stderr but always goes to a dedicated log file. We don't put these +# loggers in the classic module hierarchy, because we don't want a suppression +# of logs to also cause a trace to get suppressed (traces typically are not +# collected, unless we are in prod, in which case they always are collected.) +# +# TODO: Maybe we should allow for some sub-hierarchy so you can control which +# traces you want to collect, for performance reasons. +# +# See https://docs.google.com/document/d/1CX_hJ0PNy9f3R1y8TJrfkSeLkvGjjjLU84BSXgS2AZ8/edit +trace_log = logging.getLogger("torch.__trace") + +DEFAULT_LOG_LEVEL = logging.WARNING +LOG_ENV_VAR = "TORCH_LOGS" +LOG_OUT_ENV_VAR = "TORCH_LOGS_OUT" +LOG_FORMAT_ENV_VAR = "TORCH_LOGS_FORMAT" +TRACE_ENV_VAR = "TORCH_TRACE" + + +@dataclass +class LogRegistry: + # shorthand name to log qualified name + # Note: this only contains loggers registered + # from register_log + # e.g. "dynamo" -> "torch._dynamo" + log_alias_to_log_qnames: Dict[str, List[str]] = field(default_factory=dict) + + # artifact logger qualified names, + # this is populated lazily, as calls to getArtifactLogger + # currently formatted as .__ + # e.g. "torch._dynamo.convert_frame.__guards" + artifact_log_qnames: Set[str] = field(default_factory=set) + + # child logs of registered logs if specified via open + # registration by the user (ie placing "torch._dynamo.output_graph" in the env var) + # these need to be tracked so their levels can be reset properly + # e.g. "torch._dynamo.output_graph" + child_log_qnames: Set[str] = field(default_factory=set) + + # artifact names, populated by register_artifact + # e.g. "guards" + artifact_names: Set[str] = field(default_factory=set) + + # Artifacts that should be visible by default in the error message + visible_artifacts: Set[str] = field(default_factory=set) + + # A short description of each artifact + artifact_descriptions: Dict[str, str] = field(default_factory=dict) + + # artifacts which are not displayed unless explicitly named in the + # settings. Ex. output_code is NOT displayed even if the inductor + # log level is set to DEBUG. It must be explicitly named in the settings + off_by_default_artifact_names: Set[str] = field(default_factory=set) + + # logging format string for artifacts + artifact_log_formatters: Dict[str, logging.Formatter] = field(default_factory=dict) + + def is_artifact(self, name): + return name in self.artifact_names + + def is_log(self, alias): + return alias in self.log_alias_to_log_qnames + + # register a log with an alias + def register_log(self, alias, log_qnames: Union[str, List[str]]): + if isinstance(log_qnames, str): + log_qnames = [log_qnames] + self.log_alias_to_log_qnames[alias] = log_qnames + + # register an artifact name + def register_artifact_name( + self, name, description, visible, off_by_default, log_format + ): + self.artifact_names.add(name) + if visible: + self.visible_artifacts.add(name) + self.artifact_descriptions[name] = description + + # if off by default, don't enable it + # when log_name's log_level is set to DEBUG + if off_by_default: + self.off_by_default_artifact_names.add(name) + + if log_format is not None: + self.artifact_log_formatters[name] = logging.Formatter(log_format) + + # register the qualified name of an artifact log + # this is needed to know which logs need to be reset + # whenever the log_state is changed + def register_artifact_log(self, artifact_log_qname): + self.artifact_log_qnames.add(artifact_log_qname) + + def register_child_log(self, log_qname): + self.child_log_qnames.add(log_qname) + + # flattens all the qnames together (TODO: consider memoizing?) + def get_log_qnames(self) -> Set[str]: + return { + qname + for qnames in self.log_alias_to_log_qnames.values() + for qname in qnames + } + + def get_artifact_log_qnames(self): + return set(self.artifact_log_qnames) + + def get_child_log_qnames(self): + return set(self.child_log_qnames) + + def is_off_by_default(self, artifact_qname): + return artifact_qname in self.off_by_default_artifact_names + + +@dataclass +class LogState: + # qualified log names -> currently set log level + log_qname_to_level: Dict[str, str] = field(default_factory=dict) + + # the set of currently enabled artifacts + artifact_names: Set[str] = field(default_factory=set) + + def enable_artifact(self, artifact_name): + self.artifact_names.add(artifact_name) + + def is_artifact_enabled(self, name): + return name in self.artifact_names + + def enable_log(self, log_qnames, log_level): + if isinstance(log_qnames, str): + log_qnames = [log_qnames] + for log_qname in log_qnames: + self.log_qname_to_level[log_qname] = log_level + + def get_log_level_pairs(self): + """Returns all qualified module names for which the user requested + explicit logging settings. + + .. warning: + + This function used to return all loggers, regardless of whether + or not the user specified them or not; it now only returns logs + which were explicitly mentioned by the user (and torch, which + always is implicitly requested when we initialize our logging + subsystem.) + """ + return self.log_qname_to_level.items() + + def clear(self): + self.log_qname_to_level.clear() + self.artifact_names.clear() + + +log_registry = LogRegistry() +log_state = LogState() + +# sample usage: torch._logging.set_logs(**torch._logging.DEFAULT_LOGGING) +DEFAULT_LOGGING = { + "dynamo": logging.DEBUG, + "aot": logging.DEBUG, + "inductor": logging.DEBUG, + "ddp_graphs": True, + "graph_breaks": True, + "guards": True, + "recompiles": True, + "dynamic": logging.INFO, +} + + +def set_logs( + *, + all: Optional[int] = None, + dynamo: Optional[int] = None, + aot: Optional[int] = None, + autograd: Optional[int] = None, + dynamic: Optional[int] = None, + inductor: Optional[int] = None, + distributed: Optional[int] = None, + dist_c10d: Optional[int] = None, + dist_ddp: Optional[int] = None, + dist_fsdp: Optional[int] = None, + onnx: Optional[int] = None, + bytecode: bool = False, + aot_graphs: bool = False, + aot_joint_graph: bool = False, + ddp_graphs: bool = False, + graph: bool = False, + graph_code: bool = False, + graph_breaks: bool = False, + graph_sizes: bool = False, + guards: bool = False, + recompiles: bool = False, + recompiles_verbose: bool = False, + trace_source: bool = False, + trace_call: bool = False, + output_code: bool = False, + schedule: bool = False, + perf_hints: bool = False, + post_grad_graphs: bool = False, + onnx_diagnostics: bool = False, + fusion: bool = False, + overlap: bool = False, + export: Optional[int] = None, + modules: Optional[Dict[str, Union[int, bool]]] = None, + cudagraphs: bool = False, + sym_node: bool = False, +): + """ + Sets the log level for individual components and toggles individual log + artifact types. + + .. warning:: This feature is a prototype and may have compatibility + breaking changes in the future. + + .. note:: The ``TORCH_LOGS`` environment variable has complete precedence + over this function, so if it was set, this function does nothing. + + A component is a set of related features in PyTorch. All of the log + messages emitted from a given component have their own log levels. If the + log level of a particular message has priority greater than or equal to its + component's log level setting, it is emitted. Otherwise, it is suppressed. + This allows you to, for instance, silence large groups of log messages that + are not relevant to you and increase verbosity of logs for components that + are relevant. The expected log level values, ordered from highest to lowest + priority, are: + + * ``logging.CRITICAL`` + * ``logging.ERROR`` + * ``logging.WARNING`` + * ``logging.INFO`` + * ``logging.DEBUG`` + * ``logging.NOTSET`` + + See documentation for the Python ``logging`` module for more information on + log levels: ``_ + + An artifact is a particular type of log message. Each artifact is assigned + to a parent component. A component can emit many different kinds of + artifacts. In general, an artifact is emitted if either its corresponding + setting in the argument list below is turned on or if its parent component + is set to a log level less than or equal to the log level of the artifact. + + Keyword args: + all (:class:`Optional[int]`): + The default log level for all components. Default: ``logging.WARN`` + + dynamo (:class:`Optional[int]`): + The log level for the TorchDynamo component. Default: ``logging.WARN`` + + aot (:class:`Optional[int]`): + The log level for the AOTAutograd component. Default: ``logging.WARN`` + + autograd (:class:`Optional[int]`): + The log level for autograd. Default: ``logging.WARN`` + + inductor (:class:`Optional[int]`): + The log level for the TorchInductor component. Default: ``logging.WARN`` + + dynamic (:class:`Optional[int]`): + The log level for dynamic shapes. Default: ``logging.WARN`` + + distributed (:class:`Optional[int]`): + Whether to log c10d communication operations and other debug info from PyTorch Distributed components. + Default: ``logging.WARN`` + + dist_c10d (:class:`Optional[int]`): + Whether to log c10d communication operations related debug info in PyTorch Distributed components. + Default: ``logging.WARN`` + + dist_ddp (:class:`Optional[int]`): + Whether to log debug info related to ``DistributedDataParallel``(DDP) from PyTorch Distributed components. + Default: ``logging.WARN`` + + dist_fsdp (:class:`Optional[int]`): + Whether to log debug info related to ``FullyShardedDataParallel``(FSDP) in PyTorch Distributed components. + Default: ``logging.WARN`` + + onnx (:class:`Optional[int]`): + The log level for the ONNX exporter component. Default: ``logging.WARN`` + + bytecode (:class:`bool`): + Whether to emit the original and generated bytecode from TorchDynamo. + Default: ``False`` + + aot_graphs (:class:`bool`): + Whether to emit the graphs generated by AOTAutograd. Default: ``False`` + + aot_joint_graph (:class:`bool`): + Whether to emit the joint forward-backward graph generated by AOTAutograd. Default: ``False`` + + inductor (:class:`Optional[int]`): + Whether to log information from inductor cudagraphs. Default: ``logging.WARN`` + + ddp_graphs (:class:`bool`): + Whether to emit graphs generated by DDPOptimizer. Default: ``False`` + + graph (:class:`bool`): + Whether to emit the graph captured by TorchDynamo in tabular format. + Default: ``False`` + + graph_code (:class:`bool`): + Whether to emit the python source of the graph captured by TorchDynamo. + Default: ``False`` + + graph_breaks (:class:`bool`): + Whether to emit the graph breaks encountered by TorchDynamo. + Default: ``False`` + + graph_sizes (:class:`bool`): + Whether to emit tensor sizes of the graph captured by TorchDynamo. + Default: ``False`` + + guards (:class:`bool`): + Whether to emit the guards generated by TorchDynamo for each compiled + function. Default: ``False`` + + recompiles (:class:`bool`): + Whether to emit a guard failure reason and message every time + TorchDynamo recompiles a function. Default: ``False`` + + recompiles_verbose (:class:`bool`): + Whether to emit all guard failure reasons when TorchDynamo recompiles + a function, even those that are not actually run. Default: ``False`` + + trace_source (:class:`bool`): + Whether to emit when TorchDynamo begins tracing a new line. Default: ``False`` + + trace_call (:class:`bool`): + Whether to emit detailed line location when TorchDynamo creates an FX node + corresponding to function call. Python 3.11+ only. Default: ``False`` + + output_code (:class:`bool`): + Whether to emit the TorchInductor output code. Default: ``False`` + + schedule (:class:`bool`): + Whether to emit the TorchInductor schedule. Default: ``False`` + + perf_hints (:class:`bool`): + Whether to emit the TorchInductor perf hints. Default: ``False`` + + post_grad_graphs (:class:`bool`): + Whether to emit the graphs generated by after post grad passes. Default: ``False`` + + onnx_diagnostics (:class:`bool`): + Whether to emit the ONNX exporter diagnostics in logging. Default: ``False`` + + fusion (:class:`bool`): + Whether to emit detailed Inductor fusion decisions. Default: ``False`` + + overlap (:class:`bool`): + Whether to emit detailed Inductor compute/comm overlap decisions. Default: ``False`` + + sym_node (:class:`bool`): + Whether to emit debug info for various SymNode opterations. Default: ``False`` + + export (:class:`Optional[int]`): + The log level for export. Default: ``logging.WARN`` + + modules (dict): + This argument provides an alternate way to specify the above log + component and artifact settings, in the format of a keyword args + dictionary given as a single argument. There are two cases + where this is useful (1) if a new log component or artifact has + been registered but a keyword argument for it has not been added + to this function and (2) if the log level for an unregistered module + needs to be set. This can be done by providing the fully-qualified module + name as the key, with the log level as the value. Default: ``None`` + + + Example:: + + >>> # xdoctest: +SKIP + >>> import logging + + # The following changes the "dynamo" component to emit DEBUG-level + # logs, and to emit "graph_code" artifacts. + + >>> torch._logging.set_logs(dynamo=logging.DEBUG, graph_code=True) + + # The following enables the logs for a different module + + >>> torch._logging.set_logs(modules={"unregistered.module.name": logging.DEBUG}) + """ + # ignore if env var is set + if LOG_ENV_VAR in os.environ: + log.warning( + "Using TORCH_LOGS environment variable for log settings, ignoring call to set_logs" + ) + return + + log_state.clear() + + modules = modules or {} + + def _set_logs(**kwargs): + for alias, val in itertools.chain(kwargs.items(), modules.items()): # type: ignore[union-attr] + if val is None: + continue + + if log_registry.is_artifact(alias): + if not isinstance(val, bool): + raise ValueError( + f"Expected bool to enable artifact {alias}, received {val}" + ) + + if val: + log_state.enable_artifact(alias) + elif log_registry.is_log(alias) or alias in log_registry.child_log_qnames: + if val not in logging._levelToName: + raise ValueError( + f"Unrecognized log level for log {alias}: {val}, valid level values " + f"are: {','.join([str(k) for k in logging._levelToName.keys()])}" + ) + + log_state.enable_log( + log_registry.log_alias_to_log_qnames.get(alias, alias), val + ) + else: + raise ValueError( + f"Unrecognized log or artifact name passed to set_logs: {alias}" + ) + + _init_logs() + + _set_logs( + torch=all, + dynamo=dynamo, + aot=aot, + autograd=autograd, + inductor=inductor, + dynamic=dynamic, + bytecode=bytecode, + aot_graphs=aot_graphs, + aot_joint_graph=aot_joint_graph, + ddp_graphs=ddp_graphs, + distributed=distributed, + dist_c10d=dist_c10d, + dist_ddp=dist_ddp, + dist_fsdp=dist_fsdp, + graph=graph, + graph_code=graph_code, + graph_breaks=graph_breaks, + graph_sizes=graph_sizes, + guards=guards, + recompiles=recompiles, + recompiles_verbose=recompiles_verbose, + trace_source=trace_source, + trace_call=trace_call, + output_code=output_code, + schedule=schedule, + perf_hints=perf_hints, + post_grad_graphs=post_grad_graphs, + onnx=onnx, + onnx_diagnostics=onnx_diagnostics, + fusion=fusion, + overlap=overlap, + sym_node=sym_node, + export=export, + cudagraphs=cudagraphs, + ) + + +def get_loggers(): + """ + Returns: a list of all registered loggers + """ + return [logging.getLogger(qname) for qname in log_registry.get_log_qnames()] + + +def register_log(setting_name, log_name): + """ + Enables a log to be controlled by the env var and user API with the setting_name + Args: + setting_name: the shorthand name used in the env var and user API + log_name: the log name that the setting_name is associated with + """ + log_registry.register_log(setting_name, log_name) + + +def register_artifact( + setting_name, description, visible=False, off_by_default=False, log_format=None +): + """ + Enables an artifact to be controlled by the env var and user API with name + Args: + setting_name: the shorthand name used in the env var and user API + description: A description of what this outputs + visible: Whether it gets suggested to users by default + off_by_default: whether this artifact should be logged when the ancestor loggers + are enabled at level DEBUG + """ + log_registry.register_artifact_name( + setting_name, description, visible, off_by_default, log_format + ) + + +def getArtifactLogger(module_qname, artifact_name): + if artifact_name not in log_registry.artifact_names: + raise ValueError( + f"Artifact name: {repr(artifact_name)} not registered," + f"please call register_artifact({repr(artifact_name)}) in torch._logging.registrations." + ) + qname = module_qname + f".__{artifact_name}" + log = logging.getLogger(qname) + log.artifact_name = artifact_name # type: ignore[attr-defined] + log_registry.register_artifact_log(qname) + configure_artifact_log(log) + return log + + +INCR_VERBOSITY_CHAR = "+" +DECR_VERBOSITY_CHAR = "-" +VERBOSITY_REGEX = ( + "(" + + "|".join([re.escape(INCR_VERBOSITY_CHAR), re.escape(DECR_VERBOSITY_CHAR)]) + + "?)" +) + + +def configure_artifact_log(log): + # If the artifact is off by default, then it should only be logged when explicitly + # enabled; set propagate to False so that this artifact is not propagated + # to its ancestor logger + if log_registry.is_off_by_default(log.artifact_name): + log.propagate = False + + # enable artifact logging when explicitly enabled + if log_state.is_artifact_enabled(log.artifact_name): + log.setLevel(logging.DEBUG) + log.propagate = True + + +# match a comma separated list of loggable names (whitespace allowed after commas) +def _gen_settings_regex(): + return re.compile(r"((\+|-)?[\w\.]+,\s*)*(\+|-)?[\w\.]+?") + + +def _validate_settings(settings): + return re.fullmatch(_gen_settings_regex(), settings) is not None + + +def help_message(verbose=False): + def pad_to(s, length=30): + assert len(s) <= length + return s + " " * (length - len(s)) + + if verbose: + printed_artifacts = log_registry.artifact_names + else: + printed_artifacts = log_registry.visible_artifacts + + if verbose: + heading = "All registered names" + else: + heading = "Visible registered names (use TORCH_LOGS='+help' for full list)" + lines = ( + ["all"] + + sorted(log_registry.log_alias_to_log_qnames.keys()) + + sorted( + [ + f"{pad_to(name)}\t{log_registry.artifact_descriptions[name]}" + for name in printed_artifacts + ] + ) + ) + setting_info = " " + "\n ".join(lines) + examples = """ +Examples: + TORCH_LOGS="+dynamo,aot" will set the log level of TorchDynamo to + logging.DEBUG and AOT to logging.INFO + + TORCH_LOGS="-dynamo,+inductor" will set the log level of TorchDynamo to + logging.ERROR and TorchInductor to logging.DEBUG + + TORCH_LOGS="aot_graphs" will enable the aot_graphs artifact + + TORCH_LOGS="+dynamo,schedule" will enable set the log level of TorchDynamo + to logging.DEBUG and enable the schedule artifact + + TORCH_LOGS="+some.random.module,schedule" will set the log level of + some.random.module to logging.DEBUG and enable the schedule artifact + + TORCH_LOGS_FORMAT="%(levelname)s: %(message)s" or any provided format + string will set the output format + Valid keys are "levelname", "message", "pathname", "levelno", "lineno", + "filename" and "name". + + TORCH_LOGS_OUT=/tmp/output.txt will output the logs to /tmp/output.txt as + well. This is useful when the output is long. +""" # flake8: noqa: B950 + msg = f""" +TORCH_LOGS Info +{examples} + +{heading} +{setting_info} +""" + return msg + + +def _invalid_settings_err_msg(settings, verbose=False): + valid_settings = ", ".join( + ["all"] + + list(log_registry.log_alias_to_log_qnames.keys()) + + list(log_registry.artifact_names) + ) + msg = f""" +Invalid log settings: {settings}, must be a comma separated list of fully +qualified module names, registered log names or registered artifact names. +For more info on various settings, try TORCH_LOGS="help" +Valid settings: +{valid_settings} +""" + return msg + + +@functools.lru_cache +def _parse_log_settings(settings): + if settings == "": + return dict() + + if settings == "help": + raise ValueError(help_message(verbose=False)) + elif settings == "+help": + raise ValueError(help_message(verbose=True)) + if not _validate_settings(settings): + raise ValueError(_invalid_settings_err_msg(settings)) + + settings = re.sub(r"\s+", "", settings) + log_names = settings.split(",") + + def get_name_level_pair(name): + clean_name = name.replace(INCR_VERBOSITY_CHAR, "") + clean_name = clean_name.replace(DECR_VERBOSITY_CHAR, "") + + if name[0] == INCR_VERBOSITY_CHAR: + level = logging.DEBUG + elif name[0] == DECR_VERBOSITY_CHAR: + level = logging.ERROR + else: + level = logging.INFO + + return clean_name, level + + log_state = LogState() + + for name in log_names: + name, level = get_name_level_pair(name) + + if name == "all": + name = "torch" + + if log_registry.is_log(name): + assert level is not None + log_qnames = log_registry.log_alias_to_log_qnames[name] + log_state.enable_log(log_qnames, level) + elif log_registry.is_artifact(name): + log_state.enable_artifact(name) + elif _is_valid_module(name): + if not _has_registered_parent(name): + log_registry.register_log(name, name) + else: + log_registry.register_child_log(name) + log_state.enable_log(name, level) + else: + raise ValueError(_invalid_settings_err_msg(settings)) + + return log_state + + +def _is_valid_module(qname): + try: + __import__(qname) + return True + except ImportError: + return False + + +def _update_log_state_from_env(): + global log_state + log_setting = os.environ.get(LOG_ENV_VAR, None) + if log_setting is not None: + log_state = _parse_log_settings(log_setting) + + +def _has_registered_parent(log_qname): + cur_log = logging.getLogger(log_qname) + + registered_log_qnames = log_registry.get_log_qnames() + + while cur_log.parent: + if cur_log.name in registered_log_qnames: + return True + cur_log = cur_log.parent + + return False + + +# apply custom formats to artifacts when necessary +class TorchLogsFormatter(logging.Formatter): + def __init__(self, *, trace: bool = False): + super().__init__() + self._is_trace = trace + + def format(self, record): + artifact_name = getattr(logging.getLogger(record.name), "artifact_name", None) + if artifact_name is not None: + artifact_formatter = log_registry.artifact_log_formatters.get( + artifact_name, None + ) + if artifact_formatter is not None: + return artifact_formatter.format(record) + + record.message = record.getMessage() + record.asctime = self.formatTime(record, "%m%d %H:%M:%S") + + # exception handling - copied from logging.Formatter.format + s = record.message + if record.exc_info: + # Cache the traceback text to avoid converting it multiple times + # (it's constant anyway) + if not record.exc_text: + record.exc_text = self.formatException(record.exc_info) + if record.exc_text: + if s[-1:] != "\n": + s = s + "\n" + s = s + record.exc_text + if record.stack_info: + if s[-1:] != "\n": + s = s + "\n" + s = s + self.formatStack(record.stack_info) + + record.rankprefix = "" + if not self._is_trace and dist.is_available() and dist.is_initialized(): + record.rankprefix = f"[rank{dist.get_rank()}]:" + + record.traceid = "" + if ( + not self._is_trace + and (trace_id := torch._guards.CompileContext.current_trace_id()) + is not None + ): + record.traceid = f" [{trace_id}]" + + glog_level_to_abbr = { + "DEBUG": "V", # V is for VERBOSE in glog + "INFO": "I", + "WARNING": "W", + "ERROR": "E", + "CRITICAL": "C", + } + + shortlevel = glog_level_to_abbr.get(record.levelname, record.levelname) + + record.artifactprefix = "" + if artifact_name is not None: + record.artifactprefix = f" [__{artifact_name}]" + + prefix = ( + f"{record.rankprefix}{shortlevel}{record.asctime}.{int(record.msecs*1000):06d} {record.thread} " + f"{os.path.relpath(record.pathname, os.path.dirname(os.path.dirname(torch.__file__)))}:" + f"{record.lineno}]{record.traceid}{record.artifactprefix}" + ) + if self._is_trace: + assert s == "" + r = f"{prefix} {json.dumps(record.metadata)}" + if record.payload is not None: + r += "".join(f"\n\t{l}" for l in record.payload.split("\n")) + return r + else: + lines = s.split("\n") + return "\n".join(f"{prefix} {l}" for l in lines) + + +def _default_formatter(): + fmt = os.environ.get(LOG_FORMAT_ENV_VAR, None) + if fmt is None: + return TorchLogsFormatter() + else: + if fmt in ("short", "basic"): + fmt = logging.BASIC_FORMAT + return logging.Formatter(fmt) + + +DEFAULT_FORMATTER = _default_formatter() + + +def _setup_handlers(create_handler_fn, log): + debug_handler = _track_handler(create_handler_fn()) + debug_handler.setFormatter(DEFAULT_FORMATTER) + debug_handler.setLevel(logging.DEBUG) + log.addHandler(debug_handler) + + +handlers = WeakSet() # type: ignore[var-annotated] + + +# mark handlers that we've created +# so we don't modify user handlers +def _track_handler(handler): + handlers.add(handler) + return handler + + +def _is_torch_handler(handler): + return handler in handlers + + +# clears all torch handlers on specified loggers +def _clear_handlers(log): + to_remove = [handler for handler in log.handlers if _is_torch_handler(handler)] + for handler in to_remove: + log.removeHandler(handler) + + +def _reset_logs(): + # reset all registered logs + for log_qname in log_registry.get_log_qnames(): + log = logging.getLogger(log_qname) + log.setLevel(logging.WARNING) + log.propagate = False + _clear_handlers(log) + + # reset all artifact and child logs + for artifact_log_qname in itertools.chain( + log_registry.get_artifact_log_qnames(), log_registry.get_child_log_qnames() + ): + log = logging.getLogger(artifact_log_qname) + log.setLevel(logging.NOTSET) + log.propagate = True + + trace_log.propagate = False + _clear_handlers(trace_log) + + +def _get_log_state(): + return log_state + + +def _set_log_state(state): + global log_state + log_state = state + + +def _init_logs(log_file_name=None): + _reset_logs() + _update_log_state_from_env() + + out = os.environ.get(LOG_OUT_ENV_VAR, None) + if out is not None: + log_file_name = out + + # First, reset all known (registered) loggers to NOTSET, so that they + # respect their parent log level + for log_qname in log_registry.get_log_qnames(): + # But not the top level torch level: this defaults to WARNING so + # that our log messages don't leak to the lower levels + if log_qname == "torch": + continue + log = logging.getLogger(log_qname) + log.setLevel(logging.NOTSET) + + # Now, for all loggers which the user requested to have non-standard + # logging behavior, modify their log levels + for log_qname, level in log_state.get_log_level_pairs(): + log = logging.getLogger(log_qname) + log.setLevel(level) + + # Finally, setup handlers for all registered loggers + for log_qname in log_registry.get_log_qnames(): + log = logging.getLogger(log_qname) + _setup_handlers( + logging.StreamHandler, + log, + ) + + if log_file_name is not None: + _setup_handlers( + lambda: logging.FileHandler(log_file_name), + log, + ) + + # configure artifact loggers, note: this must happen last + # since the levels of ancestor loggers are taken into account + for artifact_log_qname in log_registry.get_artifact_log_qnames(): + log = logging.getLogger(artifact_log_qname) + configure_artifact_log(log) + + # Setup handler for the special trace_log, with different default + # configuration + trace_dir_name = os.environ.get(TRACE_ENV_VAR, None) + # This handler may remove itself if trace_dir_name is None and we are not + # actually in an FB environment. This allows us to defer actually + # initializing it until we actually need to log anything. This is + # important because JK initializes a C++ singleton, which will pork our + # process if we subsequently fork. + handler = LazyTraceHandler(trace_dir_name) + # This log is ALWAYS at debug level. We will additionally test if there + # are any handlers before deciding to actually call logging on this. Do + # not manually call + trace_log.setLevel(logging.DEBUG) + trace_log_handler = _track_handler(handler) + trace_log_handler.setFormatter(TorchLogsFormatter(trace=True)) + trace_log.addHandler(trace_log_handler) + + +class LazyTraceHandler(logging.StreamHandler): + """Like FileHandler, but the file is allocated lazily only upon the first log message""" + + def __init__(self, root_dir: Optional[str]): + # This is implemented in the same way that delay is implemented on + # FileHandler + self.root_dir = root_dir + logging.Handler.__init__(self) + self.stream = None + self._builtin_open = open + + # cloned from FileHandler in cpython + def close(self): + self.acquire() + try: + try: + if self.stream: + try: + self.flush() + finally: + stream = self.stream + self.stream = None + if hasattr(stream, "close"): + stream.close() + finally: + # Issue #19523: call unconditionally to + # prevent a handler leak when delay is set + # Also see Issue #42378: we also rely on + # self._closed being set to True there + logging.StreamHandler.close(self) + finally: + self.release() + + def emit(self, record): + if self.stream is None: + ok = False + if self.root_dir is None: + TRACE_LOG_DIR = "/logs" + open_func = self._builtin_open + + import torch.version as torch_version + + if hasattr(torch_version, "git_version"): + log.info("LazyTraceHandler: disabled because not fbcode") + elif not torch._utils_internal.justknobs_check("pytorch/trace:enable"): + log.info( + "LazyTraceHandler: disabled because justknobs_check('pytorch/trace:enable') returned False" + ) + elif not os.path.exists(TRACE_LOG_DIR): + log.info( + "LazyTraceHandler: disabled because %s does not exist", + TRACE_LOG_DIR, + ) + elif not os.access(TRACE_LOG_DIR, os.W_OK): + log.info( + "LazyTraceHandler: disabled because %s is not writeable", + TRACE_LOG_DIR, + ) + else: + self.root_dir = TRACE_LOG_DIR + + if self.root_dir is not None: + os.makedirs(self.root_dir, exist_ok=True) + ranksuffix = "" + if dist.is_available() and dist.is_initialized(): + ranksuffix = f"rank_{dist.get_rank()}_" + self.stream = tempfile.NamedTemporaryFile( + mode="w+", + suffix=".log", + prefix=f"dedicated_log_torch_trace_{ranksuffix}", + dir=self.root_dir, + delete=False, + ) + log.info("LazyTraceHandler: logging to %s", self.stream.name) + else: + # We go poof, remove and no-op + trace_log.removeHandler(self) + return + if self.stream: + super().emit(record) + + +@functools.lru_cache(None) +def warning_once(logger_obj, *args, **kwargs): + """ + This function is similar to `logger.warning()`, but will emit the warning with the same message only once + Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache. + The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to + another type of cache that includes the caller frame information in the hashing function. + """ + logger_obj.warning(*args, **kwargs) + + +class LazyString: + def __init__(self, func, *args, **kwargs): + self.func = func + self.args = args + self.kwargs = kwargs + + def __str__(self): + return self.func(*self.args, **self.kwargs) + + +def trace_structured( + name: str, + # NB: metadata expected to be dict so adding more info is forward compatible + # Tuple[str, int] is a special case for string interning + metadata_fn: Callable[[], Union[Dict[str, Any], Tuple[str, int]]] = dict, + *, + payload_fn: Callable[[], Optional[Union[str, object]]] = lambda: None, + suppress_context: bool = False, +): + """ + metadata is an arbitrary JSON compatible struct, but it's expected to not be + too long (e.g., less than 1MB) + + payload is an arbitrary string, which can be arbitrarily long (but expected to have + newlines so no lines are too long) + """ + assert "name" not in ["rank", "frame_id", "frame_compile_id", "attempt"] + assert callable( + metadata_fn + ), f"metadata_fn should be callable, but got {type(metadata_fn)}" + assert callable( + payload_fn + ), f"payload_fn should be callable, but got {type(payload_fn)}" + # trace_log never propagates and is ALWAYS DEBUG, so also check that there + # are handlers instead of checking the log level + if trace_log.handlers: + record: Dict[str, object] = {} + record[name] = metadata_fn() + if not suppress_context: + # TODO: Actually, the rank probably should just be emitted once at + # the top, and not repeatedly spammed in all the logs, since it + # never changes and we assume no interleaving + if dist.is_available() and dist.is_initialized(): + record["rank"] = dist.get_rank() + if ( + trace_id := torch._guards.CompileContext.current_trace_id() + ) is not None: + record["frame_id"] = trace_id.compile_id.frame_id + record["frame_compile_id"] = trace_id.compile_id.frame_compile_id + record["attempt"] = trace_id.attempt + payload = payload_fn() + if payload is not None: + if not isinstance(payload, str): + if isinstance(payload, list): + # special case to look better + payload = "[\n" + ",\n".join(json.dumps(i) for i in payload) + "\n]" + else: + # force newlines so we are unlikely to overflow line limit + payload = json.dumps(payload, indent=0) + h = hashlib.md5() + h.update(payload.encode("utf-8")) + record["has_payload"] = h.hexdigest() + trace_log.debug( + "", extra={"metadata": record, "payload": payload}, stacklevel=2 + ) + + +import torch._guards +import torch._utils_internal +import torch.distributed as dist diff --git a/venv/lib/python3.10/site-packages/torch/_logging/_registrations.py b/venv/lib/python3.10/site-packages/torch/_logging/_registrations.py new file mode 100644 index 0000000000000000000000000000000000000000..5ff3372feb8d74d18f238ceeedb899957faa5934 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_logging/_registrations.py @@ -0,0 +1,134 @@ +# flake8: noqa: B950 +from ._internal import register_artifact, register_log + +DYNAMIC = ["torch.fx.experimental.symbolic_shapes", "torch.fx.experimental.sym_node"] +DISTRIBUTED = [ + "torch.distributed", + "torch._dynamo.backends.distributed", + "torch.nn.parallel.distributed", +] + +register_log("dynamo", ["torch._dynamo", *DYNAMIC]) +register_log("aot", ["torch._functorch.aot_autograd", "torch._functorch._aot_autograd"]) +register_log("autograd", "torch.autograd") +register_log("inductor", ["torch._inductor", "torch._inductor.cudagraph_trees"]) + +register_artifact( + "cudagraphs", + "Logs information from wrapping inductor generated code with cudagraphs.", +) + +register_log("dynamic", DYNAMIC) +register_log("torch", "torch") +register_log("distributed", DISTRIBUTED) +register_log( + "dist_c10d", ["torch.distributed.distributed_c10d", "torch.distributed.rendezvous"] +) +register_log( + "dist_ddp", ["torch.nn.parallel.distributed", "torch._dynamo.backends.distributed"] +) +register_log("dist_fsdp", ["torch.distributed.fsdp"]) +register_log("onnx", "torch.onnx") +register_log("export", ["torch._dynamo", "torch.export", *DYNAMIC]) + +register_artifact( + "guards", + "This prints the guards for every compiled Dynamo frame. It does not tell you where the guards come from.", + visible=True, +) +register_artifact("verbose_guards", "", off_by_default=True) +register_artifact( + "bytecode", + "Prints the original and modified bytecode from Dynamo. Mostly useful if you're debugging our bytecode generation in Dynamo.", + off_by_default=True, +) +register_artifact( + "graph", + "Prints the dynamo traced graph (prior to AOTDispatch) in a table. If you prefer python code use `graph_code` instead. ", +) +register_artifact("graph_code", "Like `graph`, but gives you the Python code instead.") +register_artifact( + "graph_sizes", "Prints the sizes of all FX nodes in the dynamo graph." +) +register_artifact( + "trace_source", + "As we execute bytecode, prints the file name / line number we are processing and the actual source code. Useful with `bytecode`", +) +register_artifact( + "trace_call", + "Like trace_source, but it will give you the per-expression blow-by-blow if your Python is recent enough.", +) +register_artifact( + "aot_graphs", + "Prints the FX forward and backward graph generated by AOTDispatch, after partitioning. Useful to understand what's being given to Inductor", + visible=True, +) +register_artifact( + "aot_joint_graph", + "Print FX joint graph from AOTAutograd, prior to partitioning. Useful for debugging partitioning", +) +register_artifact( + "post_grad_graphs", + "Prints the FX graph generated by post grad passes. Useful to understand what's being given to Inductor after post grad passes", +) +register_artifact( + "compiled_autograd", + "Prints various logs in compiled_autograd, including but not limited to the graphs. Useful for debugging compiled_autograd.", + visible=True, +) +register_artifact( + "ddp_graphs", + "Only relevant for compiling DDP. DDP splits into multiple graphs to trigger comms early. This will print each individual graph here.", +) +register_artifact( + "recompiles", + "Prints the reason why we recompiled a graph. Very, very useful.", + visible=True, +) +register_artifact( + "recompiles_verbose", + "Prints all guard checks that fail during a recompilation. " + "At runtime, Dynamo will stop at the first failed check for each failing guard. " + "So not all logged failing checks are actually ran by Dynamo.", + visible=True, + off_by_default=True, +) +register_artifact( + "graph_breaks", + "Prints whenever Dynamo decides that it needs to graph break (i.e. create a new graph). Useful for debugging why torch.compile has poor performance", + visible=True, +) +register_artifact( + "not_implemented", + "Prints log messages whenever we return NotImplemented in a multi-dispatch, letting you trace through each object we attempted to dispatch to", +) +register_artifact( + "output_code", + "Prints the code that Inductor generates (either Triton or C++)", + off_by_default=True, + visible=True, +) +register_artifact( + "schedule", + "Inductor scheduler information. Useful if working on Inductor fusion algo", + off_by_default=True, +) +register_artifact("perf_hints", "", off_by_default=True) +register_artifact("onnx_diagnostics", "", off_by_default=True) +register_artifact( + "fusion", + "Detailed Inductor fusion decisions. More detailed than 'schedule'", + off_by_default=True, +) +register_artifact( + "overlap", + "Detailed Inductor compute/comm overlap decisions", + off_by_default=True, +) +register_artifact( + "sym_node", + "Logs extra info for various SymNode operations", + off_by_default=True, +) + +register_artifact("custom_format_test_artifact", "Testing only", log_format="") diff --git a/venv/lib/python3.10/site-packages/torch/_logging/structured.py b/venv/lib/python3.10/site-packages/torch/_logging/structured.py new file mode 100644 index 0000000000000000000000000000000000000000..c8ac982cf1df2b85cb0fd201078a7211e58aeeb0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_logging/structured.py @@ -0,0 +1,37 @@ +""" +Utilities for converting data types into structured JSON for dumping. +""" + +import traceback +from typing import Dict, Sequence + +import torch._logging._internal + + +INTERN_TABLE: Dict[str, int] = {} + + +def intern_string(s: str) -> int: + r = INTERN_TABLE.get(s, None) + if r is None: + r = len(INTERN_TABLE) + INTERN_TABLE[s] = r + torch._logging._internal.trace_structured( + "str", lambda: (s, r), suppress_context=True + ) + return r + + +def from_traceback(tb: Sequence[traceback.FrameSummary]) -> object: + r = [] + for frame in tb: + # dict naming convention here coincides with + # python/combined_traceback.cpp + r.append( + { + "line": frame.lineno, + "name": frame.name, + "filename": intern_string(frame.filename), + } + ) + return r diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/__init__.py b/venv/lib/python3.10/site-packages/torch/_numpy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4f0b9a0a5a1945fd7eb22035b2a0b709264b41f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/__init__.py @@ -0,0 +1,30 @@ +# mypy: ignore-errors + +from . import fft, linalg, random +from ._dtypes import * # noqa: F403 +from ._funcs import * # noqa: F403 +from ._getlimits import finfo, iinfo +from ._ndarray import ( + array, + asarray, + ascontiguousarray, + can_cast, + from_dlpack, + ndarray, + newaxis, + result_type, +) +from ._ufuncs import * # noqa: F403 +from ._util import AxisError, UFuncTypeError + +# from . import testing + +alltrue = all +sometrue = any + +inf = float("inf") +nan = float("nan") +from math import pi, e # isort: skip + +False_ = False +True_ = True diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_binary_ufuncs_impl.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_binary_ufuncs_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc6f9cfdc1995c5ca74c829248c6dbd9b240d918 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_binary_ufuncs_impl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_normalizations.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_normalizations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d57e9711ead1a633b21d656fdd9d54213b9833ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_normalizations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c3f1e149705431dbe7a454a7450d9e242ace54f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c26bfceea4d38fb276d9a94f45e961b85f296b3f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_util.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a49e75a76a538f0a9c27fcfd2fddc7aac2cb142 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/_util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/linalg.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/linalg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8461d99b524bc2d41803eb699d8285e28775b3f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/linalg.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/random.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1211ae81b9d835dcf81a2679ef067996cc746b59 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/_numpy/__pycache__/random.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py b/venv/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..34268c5911d6648c452dd7025edfc3e2f5b0db78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/_binary_ufuncs_impl.py @@ -0,0 +1,86 @@ +# mypy: ignore-errors + +"""Export torch work functions for binary ufuncs, rename/tweak to match numpy. +This listing is further exported to public symbols in the `torch._numpy/_ufuncs.py` module. +""" + +import torch + +from torch import ( # noqa: F401 + add, # noqa: F401 + arctan2, # noqa: F401 + bitwise_and, # noqa: F401 + bitwise_left_shift as left_shift, # noqa: F401 + bitwise_or, # noqa: F401 + bitwise_right_shift as right_shift, # noqa: F401 + bitwise_xor, # noqa: F401 + copysign, # noqa: F401 + divide, # noqa: F401 + eq as equal, # noqa: F401 + float_power, # noqa: F401 + floor_divide, # noqa: F401 + fmax, # noqa: F401 + fmin, # noqa: F401 + fmod, # noqa: F401 + gcd, # noqa: F401 + greater, # noqa: F401 + greater_equal, # noqa: F401 + heaviside, # noqa: F401 + hypot, # noqa: F401 + lcm, # noqa: F401 + ldexp, # noqa: F401 + less, # noqa: F401 + less_equal, # noqa: F401 + logaddexp, # noqa: F401 + logaddexp2, # noqa: F401 + logical_and, # noqa: F401 + logical_or, # noqa: F401 + logical_xor, # noqa: F401 + maximum, # noqa: F401 + minimum, # noqa: F401 + multiply, # noqa: F401 + nextafter, # noqa: F401 + not_equal, # noqa: F401 + pow as power, # noqa: F401 + remainder, # noqa: F401 + remainder as mod, # noqa: F401 + subtract, # noqa: F401 + true_divide, # noqa: F401 +) + +from . import _dtypes_impl, _util + + +# work around torch limitations w.r.t. numpy +def matmul(x, y): + # work around: + # - RuntimeError: expected scalar type Int but found Double + # - RuntimeError: "addmm_impl_cpu_" not implemented for 'Bool' + # - RuntimeError: "addmm_impl_cpu_" not implemented for 'Half' + dtype = _dtypes_impl.result_type_impl(x, y) + is_bool = dtype == torch.bool + is_half = (x.dtype == torch.float16 or y.dtype == torch.float16) and ( + x.is_cpu or y.is_cpu + ) + + work_dtype = dtype + if is_bool: + work_dtype = torch.uint8 + if is_half: + work_dtype = torch.float32 + + x = _util.cast_if_needed(x, work_dtype) + y = _util.cast_if_needed(y, work_dtype) + + result = torch.matmul(x, y) + + if work_dtype != dtype: + result = result.to(dtype) + + return result + + +# a stub implementation of divmod, should be improved after +# https://github.com/pytorch/pytorch/issues/90820 is fixed in pytorch +def divmod(x, y): + return x // y, x % y diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/_casting_dicts.py b/venv/lib/python3.10/site-packages/torch/_numpy/_casting_dicts.py new file mode 100644 index 0000000000000000000000000000000000000000..513e73ef2efe42d8014cb0d712fd370b82ac8b76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/_casting_dicts.py @@ -0,0 +1,881 @@ +# mypy: ignore-errors + +import torch + +# These two dicts are autogenerated with autogen/gen_dtypes.py, +# using numpy version 1.23.5. + +_can_cast_dict = { + "no": { + torch.float16: { + torch.float16: True, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float32: { + torch.float16: False, + torch.float32: True, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float64: { + torch.float16: False, + torch.float32: False, + torch.float64: True, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: True, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex128: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint8: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: True, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.int8: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: True, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.int16: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: True, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.int32: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: True, + torch.int64: False, + torch.bool: False, + }, + torch.int64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: True, + torch.bool: False, + }, + torch.bool: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: True, + }, + }, + "equiv": { + torch.float16: { + torch.float16: True, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float32: { + torch.float16: False, + torch.float32: True, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float64: { + torch.float16: False, + torch.float32: False, + torch.float64: True, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: True, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex128: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint8: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: True, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.int8: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: True, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.int16: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: True, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.int32: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: True, + torch.int64: False, + torch.bool: False, + }, + torch.int64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: True, + torch.bool: False, + }, + torch.bool: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: False, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: True, + }, + }, + "safe": { + torch.float16: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float32: { + torch.float16: False, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float64: { + torch.float16: False, + torch.float32: False, + torch.float64: True, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex128: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint8: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: False, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int8: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int16: { + torch.float16: False, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int32: { + torch.float16: False, + torch.float32: False, + torch.float64: True, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int64: { + torch.float16: False, + torch.float32: False, + torch.float64: True, + torch.complex64: False, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: True, + torch.bool: False, + }, + torch.bool: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + }, + "same_kind": { + torch.float16: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float32: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.float64: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex64: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.complex128: { + torch.float16: False, + torch.float32: False, + torch.float64: False, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: False, + torch.int16: False, + torch.int32: False, + torch.int64: False, + torch.bool: False, + }, + torch.uint8: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int8: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int16: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int32: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.int64: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: False, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: False, + }, + torch.bool: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + }, + "unsafe": { + torch.float16: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.float32: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.float64: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.complex64: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.complex128: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.uint8: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.int8: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.int16: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.int32: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.int64: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + torch.bool: { + torch.float16: True, + torch.float32: True, + torch.float64: True, + torch.complex64: True, + torch.complex128: True, + torch.uint8: True, + torch.int8: True, + torch.int16: True, + torch.int32: True, + torch.int64: True, + torch.bool: True, + }, + }, +} + + +_result_type_dict = { + torch.float16: { + torch.float16: torch.float16, + torch.float32: torch.float32, + torch.float64: torch.float64, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.float16, + torch.int8: torch.float16, + torch.int16: torch.float32, + torch.int32: torch.float64, + torch.int64: torch.float64, + torch.bool: torch.float16, + }, + torch.float32: { + torch.float16: torch.float32, + torch.float32: torch.float32, + torch.float64: torch.float64, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.float32, + torch.int8: torch.float32, + torch.int16: torch.float32, + torch.int32: torch.float64, + torch.int64: torch.float64, + torch.bool: torch.float32, + }, + torch.float64: { + torch.float16: torch.float64, + torch.float32: torch.float64, + torch.float64: torch.float64, + torch.complex64: torch.complex128, + torch.complex128: torch.complex128, + torch.uint8: torch.float64, + torch.int8: torch.float64, + torch.int16: torch.float64, + torch.int32: torch.float64, + torch.int64: torch.float64, + torch.bool: torch.float64, + }, + torch.complex64: { + torch.float16: torch.complex64, + torch.float32: torch.complex64, + torch.float64: torch.complex128, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.complex64, + torch.int8: torch.complex64, + torch.int16: torch.complex64, + torch.int32: torch.complex128, + torch.int64: torch.complex128, + torch.bool: torch.complex64, + }, + torch.complex128: { + torch.float16: torch.complex128, + torch.float32: torch.complex128, + torch.float64: torch.complex128, + torch.complex64: torch.complex128, + torch.complex128: torch.complex128, + torch.uint8: torch.complex128, + torch.int8: torch.complex128, + torch.int16: torch.complex128, + torch.int32: torch.complex128, + torch.int64: torch.complex128, + torch.bool: torch.complex128, + }, + torch.uint8: { + torch.float16: torch.float16, + torch.float32: torch.float32, + torch.float64: torch.float64, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.uint8, + torch.int8: torch.int16, + torch.int16: torch.int16, + torch.int32: torch.int32, + torch.int64: torch.int64, + torch.bool: torch.uint8, + }, + torch.int8: { + torch.float16: torch.float16, + torch.float32: torch.float32, + torch.float64: torch.float64, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.int16, + torch.int8: torch.int8, + torch.int16: torch.int16, + torch.int32: torch.int32, + torch.int64: torch.int64, + torch.bool: torch.int8, + }, + torch.int16: { + torch.float16: torch.float32, + torch.float32: torch.float32, + torch.float64: torch.float64, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.int16, + torch.int8: torch.int16, + torch.int16: torch.int16, + torch.int32: torch.int32, + torch.int64: torch.int64, + torch.bool: torch.int16, + }, + torch.int32: { + torch.float16: torch.float64, + torch.float32: torch.float64, + torch.float64: torch.float64, + torch.complex64: torch.complex128, + torch.complex128: torch.complex128, + torch.uint8: torch.int32, + torch.int8: torch.int32, + torch.int16: torch.int32, + torch.int32: torch.int32, + torch.int64: torch.int64, + torch.bool: torch.int32, + }, + torch.int64: { + torch.float16: torch.float64, + torch.float32: torch.float64, + torch.float64: torch.float64, + torch.complex64: torch.complex128, + torch.complex128: torch.complex128, + torch.uint8: torch.int64, + torch.int8: torch.int64, + torch.int16: torch.int64, + torch.int32: torch.int64, + torch.int64: torch.int64, + torch.bool: torch.int64, + }, + torch.bool: { + torch.float16: torch.float16, + torch.float32: torch.float32, + torch.float64: torch.float64, + torch.complex64: torch.complex64, + torch.complex128: torch.complex128, + torch.uint8: torch.uint8, + torch.int8: torch.int8, + torch.int16: torch.int16, + torch.int32: torch.int32, + torch.int64: torch.int64, + torch.bool: torch.bool, + }, +} diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/_dtypes.py b/venv/lib/python3.10/site-packages/torch/_numpy/_dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b8f4f722bec921bc352777afd298aae0e2dd20 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/_dtypes.py @@ -0,0 +1,434 @@ +# mypy: ignore-errors + +""" Define analogs of numpy dtypes supported by pytorch. +Define the scalar types and supported dtypes and numpy <--> torch dtype mappings. +""" +import builtins + +import torch + +from . import _dtypes_impl + + +# ### Scalar types ### + + +class generic: + name = "generic" + + def __new__(cls, value): + # NumPy scalars are modelled as 0-D arrays + # so a call to np.float32(4) produces a 0-D array. + + from ._ndarray import asarray, ndarray + + if isinstance(value, str) and value in ["inf", "nan"]: + value = {"inf": torch.inf, "nan": torch.nan}[value] + + if isinstance(value, ndarray): + return value.astype(cls) + else: + return asarray(value, dtype=cls) + + +################## +# abstract types # +################## + + +class number(generic): + name = "number" + + +class integer(number): + name = "integer" + + +class inexact(number): + name = "inexact" + + +class signedinteger(integer): + name = "signedinteger" + + +class unsignedinteger(integer): + name = "unsignedinteger" + + +class floating(inexact): + name = "floating" + + +class complexfloating(inexact): + name = "complexfloating" + + +_abstract_dtypes = [ + "generic", + "number", + "integer", + "signedinteger", + "unsignedinteger", + "inexact", + "floating", + "complexfloating", +] + +# ##### concrete types + +# signed integers + + +class int8(signedinteger): + name = "int8" + typecode = "b" + torch_dtype = torch.int8 + + +class int16(signedinteger): + name = "int16" + typecode = "h" + torch_dtype = torch.int16 + + +class int32(signedinteger): + name = "int32" + typecode = "i" + torch_dtype = torch.int32 + + +class int64(signedinteger): + name = "int64" + typecode = "l" + torch_dtype = torch.int64 + + +# unsigned integers + + +class uint8(unsignedinteger): + name = "uint8" + typecode = "B" + torch_dtype = torch.uint8 + + +# floating point + + +class float16(floating): + name = "float16" + typecode = "e" + torch_dtype = torch.float16 + + +class float32(floating): + name = "float32" + typecode = "f" + torch_dtype = torch.float32 + + +class float64(floating): + name = "float64" + typecode = "d" + torch_dtype = torch.float64 + + +class complex64(complexfloating): + name = "complex64" + typecode = "F" + torch_dtype = torch.complex64 + + +class complex128(complexfloating): + name = "complex128" + typecode = "D" + torch_dtype = torch.complex128 + + +class bool_(generic): + name = "bool_" + typecode = "?" + torch_dtype = torch.bool + + +# name aliases +_name_aliases = { + "intp": int64, + "int_": int64, + "intc": int32, + "byte": int8, + "short": int16, + "longlong": int64, # XXX: is this correct? + "ubyte": uint8, + "half": float16, + "single": float32, + "double": float64, + "float_": float64, + "csingle": complex64, + "singlecomplex": complex64, + "cdouble": complex128, + "cfloat": complex128, + "complex_": complex128, +} +# We register float_ = float32 and so on +for name, obj in _name_aliases.items(): + vars()[name] = obj + + +# Replicate this NumPy-defined way of grouping scalar types, +# cf tests/core/test_scalar_methods.py +sctypes = { + "int": [int8, int16, int32, int64], + "uint": [uint8], + "float": [float16, float32, float64], + "complex": [complex64, complex128], + "others": [bool_], +} + + +# Support mappings/functions + +_names = {st.name: st for cat in sctypes for st in sctypes[cat]} +_typecodes = {st.typecode: st for cat in sctypes for st in sctypes[cat]} +_torch_dtypes = {st.torch_dtype: st for cat in sctypes for st in sctypes[cat]} + + +_aliases = { + "u1": uint8, + "i1": int8, + "i2": int16, + "i4": int32, + "i8": int64, + "b": int8, # XXX: srsly? + "f2": float16, + "f4": float32, + "f8": float64, + "c8": complex64, + "c16": complex128, + # numpy-specific trailing underscore + "bool_": bool_, +} + + +_python_types = { + int: int64, + float: float64, + complex: complex128, + builtins.bool: bool_, + # also allow stringified names of python types + int.__name__: int64, + float.__name__: float64, + complex.__name__: complex128, + builtins.bool.__name__: bool_, +} + + +def sctype_from_string(s): + """Normalize a string value: a type 'name' or a typecode or a width alias.""" + if s in _names: + return _names[s] + if s in _name_aliases.keys(): + return _name_aliases[s] + if s in _typecodes: + return _typecodes[s] + if s in _aliases: + return _aliases[s] + if s in _python_types: + return _python_types[s] + raise TypeError(f"data type {s!r} not understood") + + +def sctype_from_torch_dtype(torch_dtype): + return _torch_dtypes[torch_dtype] + + +# ### DTypes. ### + + +def dtype(arg): + if arg is None: + arg = _dtypes_impl.default_dtypes().float_dtype + return DType(arg) + + +class DType: + def __init__(self, arg): + # a pytorch object? + if isinstance(arg, torch.dtype): + sctype = _torch_dtypes[arg] + elif isinstance(arg, torch.Tensor): + sctype = _torch_dtypes[arg.dtype] + # a scalar type? + elif issubclass_(arg, generic): + sctype = arg + # a dtype already? + elif isinstance(arg, DType): + sctype = arg._scalar_type + # a has a right attribute? + elif hasattr(arg, "dtype"): + sctype = arg.dtype._scalar_type + else: + sctype = sctype_from_string(arg) + self._scalar_type = sctype + + @property + def name(self): + return self._scalar_type.name + + @property + def type(self): + return self._scalar_type + + @property + def kind(self): + # https://numpy.org/doc/stable/reference/generated/numpy.dtype.kind.html + return _torch_dtypes[self.torch_dtype].name[0] + + @property + def typecode(self): + return self._scalar_type.typecode + + def __eq__(self, other): + if isinstance(other, DType): + return self._scalar_type == other._scalar_type + try: + other_instance = DType(other) + except TypeError: + return False + return self._scalar_type == other_instance._scalar_type + + @property + def torch_dtype(self): + return self._scalar_type.torch_dtype + + def __hash__(self): + return hash(self._scalar_type.name) + + def __repr__(self): + return f'dtype("{self.name}")' + + __str__ = __repr__ + + @property + def itemsize(self): + elem = self.type(1) + return elem.tensor.element_size() + + def __getstate__(self): + return self._scalar_type + + def __setstate__(self, value): + self._scalar_type = value + + +typecodes = { + "All": "efdFDBbhil?", + "AllFloat": "efdFD", + "AllInteger": "Bbhil", + "Integer": "bhil", + "UnsignedInteger": "B", + "Float": "efd", + "Complex": "FD", +} + + +# ### Defaults and dtype discovery + + +def set_default_dtype(fp_dtype="numpy", int_dtype="numpy"): + """Set the (global) defaults for fp, complex, and int dtypes. + + The complex dtype is inferred from the float (fp) dtype. It has + a width at least twice the width of the float dtype, + i.e., it's complex128 for float64 and complex64 for float32. + + Parameters + ---------- + fp_dtype + Allowed values are "numpy", "pytorch" or dtype_like things which + can be converted into a DType instance. + Default is "numpy" (i.e. float64). + int_dtype + Allowed values are "numpy", "pytorch" or dtype_like things which + can be converted into a DType instance. + Default is "numpy" (i.e. int64). + + Returns + ------- + The old default dtype state: a namedtuple with attributes ``float_dtype``, + ``complex_dtypes`` and ``int_dtype``. These attributes store *pytorch* + dtypes. + + Notes + ------------ + This functions has a side effect: it sets the global state with the provided dtypes. + + The complex dtype has bit width of at least twice the width of the float + dtype, i.e. it's complex128 for float64 and complex64 for float32. + + """ + if fp_dtype not in ["numpy", "pytorch"]: + fp_dtype = dtype(fp_dtype).torch_dtype + if int_dtype not in ["numpy", "pytorch"]: + int_dtype = dtype(int_dtype).torch_dtype + + if fp_dtype == "numpy": + float_dtype = torch.float64 + elif fp_dtype == "pytorch": + float_dtype = torch.float32 + else: + float_dtype = fp_dtype + + complex_dtype = { + torch.float64: torch.complex128, + torch.float32: torch.complex64, + torch.float16: torch.complex64, + }[float_dtype] + + if int_dtype in ["numpy", "pytorch"]: + int_dtype = torch.int64 + else: + int_dtype = int_dtype + + new_defaults = _dtypes_impl.DefaultDTypes( + float_dtype=float_dtype, complex_dtype=complex_dtype, int_dtype=int_dtype + ) + + # set the new global state and return the old state + old_defaults = _dtypes_impl.default_dtypes + _dtypes_impl._default_dtypes = new_defaults + return old_defaults + + +def issubclass_(arg, klass): + try: + return issubclass(arg, klass) + except TypeError: + return False + + +def issubdtype(arg1, arg2): + # cf https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numerictypes.py#L356-L420 + + # We also accept strings even if NumPy doesn't as dtypes are serialized as their + # string representation in dynamo's graph + def str_to_abstract(t): + if isinstance(t, str) and t in _abstract_dtypes: + return globals()[t] + return t + + arg1 = str_to_abstract(arg1) + arg2 = str_to_abstract(arg2) + + if not issubclass_(arg1, generic): + arg1 = dtype(arg1).type + if not issubclass_(arg2, generic): + arg2 = dtype(arg2).type + return issubclass(arg1, arg2) + + +__all__ = ["dtype", "DType", "typecodes", "issubdtype", "set_default_dtype", "sctypes"] +__all__ += list(_names.keys()) # noqa: PLE0605 +__all__ += list(_name_aliases.keys()) # noqa: PLE0605 +__all__ += _abstract_dtypes # noqa: PLE0605 diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/_dtypes_impl.py b/venv/lib/python3.10/site-packages/torch/_numpy/_dtypes_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..2809c69e999c03834840d45e43e52cc8670b73e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/_dtypes_impl.py @@ -0,0 +1,216 @@ +# mypy: ignore-errors + +"""Dtypes/scalar type implementaions with torch dtypes. + +Here `dtype` is always a torch.dtype, this module knows nothing about +scalar types, wrapper dtypes or anything like that. PyTorch only. +""" +from collections import namedtuple + +import torch + +# defaults : mimic NumPy, allow user control +DefaultDTypes = namedtuple( + "DefaultDTypes", ["float_dtype", "complex_dtype", "int_dtype"] +) + +# a global state +# We set it the first time we call default_dtypes() to avoid importing +# torch._dynamo.config and create a circular reference +_default_dtypes = None + + +def default_dtypes(): + global _default_dtypes + if _default_dtypes is None: + import torch._dynamo.config as config + + _default_dtypes = DefaultDTypes( + float_dtype=getattr(torch, config.numpy_default_float), + complex_dtype=getattr(torch, config.numpy_default_complex), + int_dtype=getattr(torch, config.numpy_default_int), + ) + assert isinstance(_default_dtypes.float_dtype, torch.dtype) + assert isinstance(_default_dtypes.complex_dtype, torch.dtype) + assert isinstance(_default_dtypes.int_dtype, torch.dtype) + return _default_dtypes + + +def get_default_dtype_for(dtype): + """Default scalar type given sctype category.""" + if dtype == torch.bool: + return dtype + if dtype.is_complex: + return default_dtypes().complex_dtype + if dtype.is_floating_point: + return default_dtypes().float_dtype + # else, it must be (some) integer + return default_dtypes().int_dtype + + +from . import _casting_dicts as _cd + + +def can_cast_impl(from_torch_dtype, to_torch_dtype, casting): + return _cd._can_cast_dict[casting][from_torch_dtype][to_torch_dtype] + + +def result_type_impl(*tensors): + # NB: torch dtypes here + dtyp = tensors[0].dtype + if len(tensors) == 1: + return dtyp + + for curr in tensors[1:]: + dtyp = _cd._result_type_dict[dtyp][curr.dtype] + + return dtyp + + +def python_type_for_torch(dtyp): + """Get a python scalar type a torch dtype""" + if dtyp.is_floating_point: + typ = float + elif dtyp.is_complex: + typ = complex + elif dtyp == torch.bool: + typ = bool + else: + typ = int + return typ + + +# ### NEP 50 helpers ### + +_SCALAR_TYPES = (int, bool, float, complex) + +_SCALAR_AND_SYMBOLIC_TYPES = ( + *_SCALAR_TYPES, + torch.SymInt, + torch.SymFloat, + torch.SymBool, +) + +_NEP50_FUNCS_TENSOR_ONLY = ( + "minimum", + "maximum", + "logaddexp", + "logaddexp2", + "lcm", + "gcd", + "hypot", + "heaviside", + "fmod", + "fmin", + "fmax", + "copysign", + "arctan2", +) + + +def is_scalar(x): + return isinstance(x, _SCALAR_TYPES) + + +def is_scalar_or_symbolic(x): + return isinstance(x, _SCALAR_AND_SYMBOLIC_TYPES) + + +def _dtype_for_scalar(py_type): + return { + bool: torch.bool, + torch.SymBool: torch.bool, + int: torch.int64, + torch.SymInt: torch.int64, + float: torch.float64, + torch.SymFloat: torch.float64, + complex: torch.complex128, + }[py_type] + + +def _dtype_for_scalar_or_tensor(x): + return x.dtype if isinstance(x, torch.Tensor) else _dtype_for_scalar(type(x)) + + +def is_float_or_fp_tensor(x): + return _dtype_for_scalar_or_tensor(x).is_floating_point + + +def is_complex_or_complex_tensor(x): + return _dtype_for_scalar_or_tensor(x).is_complex + + +def _category(dtype): + return { + torch.bool: 0, + torch.SymBool: 0, + # int + torch.uint8: 1, + torch.int8: 1, + torch.int16: 1, + torch.int32: 1, + torch.int64: 1, + torch.SymInt: 1, + # float + torch.float16: 2, + torch.float32: 2, + torch.float64: 2, + torch.SymFloat: 2, + # complex + torch.complex64: 3, + torch.complex128: 3, + }[dtype] + + +def nep50_to_tensors(x1, x2, handle_weaks, function_name): + """If either of inputs is a python scalar, type-promote with NEP 50.""" + + def to_tensor(scalar, dtype=None): + if dtype is None: + dtype = _dtype_for_scalar(type(scalar)) + dtype = get_default_dtype_for(dtype) + return torch.as_tensor(scalar, dtype=dtype) + + x1_is_weak = not isinstance(x1, torch.Tensor) + x2_is_weak = not isinstance(x2, torch.Tensor) + if not handle_weaks or (x1_is_weak and x2_is_weak): + x1 = to_tensor(x1) if x1_is_weak else x1 + x2 = to_tensor(x2) if x2_is_weak else x2 + return x1, x2 + + # scalar tensor: NEP 50 + assert x1_is_weak != x2_is_weak + + weak, not_weak = (x1, x2) if x1_is_weak else (x2, x1) + + # find the dtype for the weak's type + weak_dtype = _dtype_for_scalar(type(weak)) + + cat_weak = _category(weak_dtype) + cat_not_weak = _category(not_weak.dtype) + + dt = not_weak.dtype if cat_weak <= cat_not_weak else None + + # special-case complex + float32 + if weak_dtype.is_complex and not_weak.dtype == torch.float32: + dt = torch.complex64 + + # detect overflows: in PyTorch, uint8(-1) wraps around to 255, + # while NEP50 mandates an exception. + # + # Note that we only check if each element of the binop overflows, + # not the result. Consider, e.g. `uint8(100) + 200`. Operands are OK + # in uint8, but the result overflows and wrap around 255. + # Numpy emits a RuntimeWarning, PyTorch does not, and we do not either. + if cat_weak == 1 and cat_not_weak == 1: + # integers + iinfo = torch.iinfo(not_weak.dtype) + if not (iinfo.min <= weak <= iinfo.max): + raise OverflowError( + f"Python integer {weak} out of bounds for {not_weak.dtype}" + ) + if weak_dtype != dt or function_name in _NEP50_FUNCS_TENSOR_ONLY: + # finally, can make `weak` into a 0D tensor, if both parameters are required to be tensor. + weak = to_tensor(weak, dt) + + return (weak, not_weak) if x1_is_weak else (not_weak, weak) diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/_funcs.py b/venv/lib/python3.10/site-packages/torch/_numpy/_funcs.py new file mode 100644 index 0000000000000000000000000000000000000000..666922adc709ea04f02aad43b80cefd380deef29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/_funcs.py @@ -0,0 +1,75 @@ +# mypy: ignore-errors + +import inspect +import itertools + +from . import _funcs_impl, _reductions_impl +from ._normalizations import normalizer + +# _funcs_impl.py contains functions which mimic NumPy's eponymous equivalents, +# and consume/return PyTorch tensors/dtypes. +# They are also type annotated. +# Pull these functions from _funcs_impl and decorate them with @normalizer, which +# - Converts any input `np.ndarray`, `torch._numpy.ndarray`, list of lists, Python scalars, etc into a `torch.Tensor`. +# - Maps NumPy dtypes to PyTorch dtypes +# - If the input to the `axis` kwarg is an ndarray, it maps it into a tuple +# - Implements the semantics for the `out=` arg +# - Wraps back the outputs into `torch._numpy.ndarrays` + + +def _public_functions(mod): + def is_public_function(f): + return inspect.isfunction(f) and not f.__name__.startswith("_") + + return inspect.getmembers(mod, is_public_function) + + +# We fill in __all__ in the loop below +__all__ = [] + +# decorate implementer functions with argument normalizers and export to the top namespace +for name, func in itertools.chain( + _public_functions(_funcs_impl), _public_functions(_reductions_impl) +): + if name in ["percentile", "quantile", "median"]: + decorated = normalizer(func, promote_scalar_result=True) + elif name == "einsum": + # normalized manually + decorated = func + else: + decorated = normalizer(func) + + decorated.__qualname__ = name + decorated.__name__ = name + vars()[name] = decorated + __all__.append(name) + + +""" +Vendored objects from numpy.lib.index_tricks +""" + + +class IndexExpression: + """ + Written by Konrad Hinsen + last revision: 1999-7-23 + + Cosmetic changes by T. Oliphant 2001 + """ + + def __init__(self, maketuple): + self.maketuple = maketuple + + def __getitem__(self, item): + if self.maketuple and not isinstance(item, tuple): + return (item,) + else: + return item + + +index_exp = IndexExpression(maketuple=True) +s_ = IndexExpression(maketuple=False) + + +__all__ += ["index_exp", "s_"] diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py b/venv/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..27a2c20d74cf61aef2cd227b33ccf4d03a003d87 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py @@ -0,0 +1,2053 @@ +# mypy: ignore-errors + +"""A thin pytorch / numpy compat layer. + +Things imported from here have numpy-compatible signatures but operate on +pytorch tensors. +""" +# Contents of this module ends up in the main namespace via _funcs.py +# where type annotations are used in conjunction with the @normalizer decorator. +from __future__ import annotations + +import builtins +import itertools +import operator +from typing import Optional, Sequence + +import torch + +from . import _dtypes_impl, _util +from ._normalizations import ( + ArrayLike, + ArrayLikeOrScalar, + CastingModes, + DTypeLike, + NDArray, + NotImplementedType, + OutArray, +) + + +def copy( + a: ArrayLike, order: NotImplementedType = "K", subok: NotImplementedType = False +): + return a.clone() + + +def copyto( + dst: NDArray, + src: ArrayLike, + casting: Optional[CastingModes] = "same_kind", + where: NotImplementedType = None, +): + (src,) = _util.typecast_tensors((src,), dst.dtype, casting=casting) + dst.copy_(src) + + +def atleast_1d(*arys: ArrayLike): + res = torch.atleast_1d(*arys) + if isinstance(res, tuple): + return list(res) + else: + return res + + +def atleast_2d(*arys: ArrayLike): + res = torch.atleast_2d(*arys) + if isinstance(res, tuple): + return list(res) + else: + return res + + +def atleast_3d(*arys: ArrayLike): + res = torch.atleast_3d(*arys) + if isinstance(res, tuple): + return list(res) + else: + return res + + +def _concat_check(tup, dtype, out): + if tup == (): + raise ValueError("need at least one array to concatenate") + + """Check inputs in concatenate et al.""" + if out is not None and dtype is not None: + # mimic numpy + raise TypeError( + "concatenate() only takes `out` or `dtype` as an " + "argument, but both were provided." + ) + + +def _concat_cast_helper(tensors, out=None, dtype=None, casting="same_kind"): + """Figure out dtypes, cast if necessary.""" + + if out is not None or dtype is not None: + # figure out the type of the inputs and outputs + out_dtype = out.dtype.torch_dtype if dtype is None else dtype + else: + out_dtype = _dtypes_impl.result_type_impl(*tensors) + + # cast input arrays if necessary; do not broadcast them agains `out` + tensors = _util.typecast_tensors(tensors, out_dtype, casting) + + return tensors + + +def _concatenate( + tensors, axis=0, out=None, dtype=None, casting: Optional[CastingModes] = "same_kind" +): + # pure torch implementation, used below and in cov/corrcoef below + tensors, axis = _util.axis_none_flatten(*tensors, axis=axis) + tensors = _concat_cast_helper(tensors, out, dtype, casting) + return torch.cat(tensors, axis) + + +def concatenate( + ar_tuple: Sequence[ArrayLike], + axis=0, + out: Optional[OutArray] = None, + dtype: Optional[DTypeLike] = None, + casting: Optional[CastingModes] = "same_kind", +): + _concat_check(ar_tuple, dtype, out=out) + result = _concatenate(ar_tuple, axis=axis, out=out, dtype=dtype, casting=casting) + return result + + +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: Optional[DTypeLike] = None, + casting: Optional[CastingModes] = "same_kind", +): + _concat_check(tup, dtype, out=None) + tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) + return torch.vstack(tensors) + + +row_stack = vstack + + +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: Optional[DTypeLike] = None, + casting: Optional[CastingModes] = "same_kind", +): + _concat_check(tup, dtype, out=None) + tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) + return torch.hstack(tensors) + + +def dstack( + tup: Sequence[ArrayLike], + *, + dtype: Optional[DTypeLike] = None, + casting: Optional[CastingModes] = "same_kind", +): + # XXX: in numpy 1.24 dstack does not have dtype and casting keywords + # but {h,v}stack do. Hence add them here for consistency. + _concat_check(tup, dtype, out=None) + tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) + return torch.dstack(tensors) + + +def column_stack( + tup: Sequence[ArrayLike], + *, + dtype: Optional[DTypeLike] = None, + casting: Optional[CastingModes] = "same_kind", +): + # XXX: in numpy 1.24 column_stack does not have dtype and casting keywords + # but row_stack does. (because row_stack is an alias for vstack, really). + # Hence add these keywords here for consistency. + _concat_check(tup, dtype, out=None) + tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting) + return torch.column_stack(tensors) + + +def stack( + arrays: Sequence[ArrayLike], + axis=0, + out: Optional[OutArray] = None, + *, + dtype: Optional[DTypeLike] = None, + casting: Optional[CastingModes] = "same_kind", +): + _concat_check(arrays, dtype, out=out) + + tensors = _concat_cast_helper(arrays, dtype=dtype, casting=casting) + result_ndim = tensors[0].ndim + 1 + axis = _util.normalize_axis_index(axis, result_ndim) + return torch.stack(tensors, axis=axis) + + +def append(arr: ArrayLike, values: ArrayLike, axis=None): + if axis is None: + if arr.ndim != 1: + arr = arr.flatten() + values = values.flatten() + axis = arr.ndim - 1 + return _concatenate((arr, values), axis=axis) + + +# ### split ### + + +def _split_helper(tensor, indices_or_sections, axis, strict=False): + if isinstance(indices_or_sections, int): + return _split_helper_int(tensor, indices_or_sections, axis, strict) + elif isinstance(indices_or_sections, (list, tuple)): + # NB: drop split=..., it only applies to split_helper_int + return _split_helper_list(tensor, list(indices_or_sections), axis) + else: + raise TypeError("split_helper: ", type(indices_or_sections)) + + +def _split_helper_int(tensor, indices_or_sections, axis, strict=False): + if not isinstance(indices_or_sections, int): + raise NotImplementedError("split: indices_or_sections") + + axis = _util.normalize_axis_index(axis, tensor.ndim) + + # numpy: l%n chunks of size (l//n + 1), the rest are sized l//n + l, n = tensor.shape[axis], indices_or_sections + + if n <= 0: + raise ValueError() + + if l % n == 0: + num, sz = n, l // n + lst = [sz] * num + else: + if strict: + raise ValueError("array split does not result in an equal division") + + num, sz = l % n, l // n + 1 + lst = [sz] * num + + lst += [sz - 1] * (n - num) + + return torch.split(tensor, lst, axis) + + +def _split_helper_list(tensor, indices_or_sections, axis): + if not isinstance(indices_or_sections, list): + raise NotImplementedError("split: indices_or_sections: list") + # numpy expects indices, while torch expects lengths of sections + # also, numpy appends zero-size arrays for indices above the shape[axis] + lst = [x for x in indices_or_sections if x <= tensor.shape[axis]] + num_extra = len(indices_or_sections) - len(lst) + + lst.append(tensor.shape[axis]) + lst = [ + lst[0], + ] + [a - b for a, b in zip(lst[1:], lst[:-1])] + lst += [0] * num_extra + + return torch.split(tensor, lst, axis) + + +def array_split(ary: ArrayLike, indices_or_sections, axis=0): + return _split_helper(ary, indices_or_sections, axis) + + +def split(ary: ArrayLike, indices_or_sections, axis=0): + return _split_helper(ary, indices_or_sections, axis, strict=True) + + +def hsplit(ary: ArrayLike, indices_or_sections): + if ary.ndim == 0: + raise ValueError("hsplit only works on arrays of 1 or more dimensions") + axis = 1 if ary.ndim > 1 else 0 + return _split_helper(ary, indices_or_sections, axis, strict=True) + + +def vsplit(ary: ArrayLike, indices_or_sections): + if ary.ndim < 2: + raise ValueError("vsplit only works on arrays of 2 or more dimensions") + return _split_helper(ary, indices_or_sections, 0, strict=True) + + +def dsplit(ary: ArrayLike, indices_or_sections): + if ary.ndim < 3: + raise ValueError("dsplit only works on arrays of 3 or more dimensions") + return _split_helper(ary, indices_or_sections, 2, strict=True) + + +def kron(a: ArrayLike, b: ArrayLike): + return torch.kron(a, b) + + +def vander(x: ArrayLike, N=None, increasing=False): + return torch.vander(x, N, increasing) + + +# ### linspace, geomspace, logspace and arange ### + + +def linspace( + start: ArrayLike, + stop: ArrayLike, + num=50, + endpoint=True, + retstep=False, + dtype: Optional[DTypeLike] = None, + axis=0, +): + if axis != 0 or retstep or not endpoint: + raise NotImplementedError + if dtype is None: + dtype = _dtypes_impl.default_dtypes().float_dtype + # XXX: raises TypeError if start or stop are not scalars + return torch.linspace(start, stop, num, dtype=dtype) + + +def geomspace( + start: ArrayLike, + stop: ArrayLike, + num=50, + endpoint=True, + dtype: Optional[DTypeLike] = None, + axis=0, +): + if axis != 0 or not endpoint: + raise NotImplementedError + base = torch.pow(stop / start, 1.0 / (num - 1)) + logbase = torch.log(base) + return torch.logspace( + torch.log(start) / logbase, + torch.log(stop) / logbase, + num, + base=base, + ) + + +def logspace( + start, + stop, + num=50, + endpoint=True, + base=10.0, + dtype: Optional[DTypeLike] = None, + axis=0, +): + if axis != 0 or not endpoint: + raise NotImplementedError + return torch.logspace(start, stop, num, base=base, dtype=dtype) + + +def arange( + start: Optional[ArrayLikeOrScalar] = None, + stop: Optional[ArrayLikeOrScalar] = None, + step: Optional[ArrayLikeOrScalar] = 1, + dtype: Optional[DTypeLike] = None, + *, + like: NotImplementedType = None, +): + if step == 0: + raise ZeroDivisionError + if stop is None and start is None: + raise TypeError + if stop is None: + # XXX: this breaks if start is passed as a kwarg: + # arange(start=4) should raise (no stop) but doesn't + start, stop = 0, start + if start is None: + start = 0 + + # the dtype of the result + if dtype is None: + dtype = ( + _dtypes_impl.default_dtypes().float_dtype + if any(_dtypes_impl.is_float_or_fp_tensor(x) for x in (start, stop, step)) + else _dtypes_impl.default_dtypes().int_dtype + ) + work_dtype = torch.float64 if dtype.is_complex else dtype + + # RuntimeError: "lt_cpu" not implemented for 'ComplexFloat'. Fall back to eager. + if any(_dtypes_impl.is_complex_or_complex_tensor(x) for x in (start, stop, step)): + raise NotImplementedError + + if (step > 0 and start > stop) or (step < 0 and start < stop): + # empty range + return torch.empty(0, dtype=dtype) + + result = torch.arange(start, stop, step, dtype=work_dtype) + result = _util.cast_if_needed(result, dtype) + return result + + +# ### zeros/ones/empty/full ### + + +def empty( + shape, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "C", + *, + like: NotImplementedType = None, +): + if dtype is None: + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.empty(shape, dtype=dtype) + + +# NB: *_like functions deliberately deviate from numpy: it has subok=True +# as the default; we set subok=False and raise on anything else. + + +def empty_like( + prototype: ArrayLike, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "K", + subok: NotImplementedType = False, + shape=None, +): + result = torch.empty_like(prototype, dtype=dtype) + if shape is not None: + result = result.reshape(shape) + return result + + +def full( + shape, + fill_value: ArrayLike, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "C", + *, + like: NotImplementedType = None, +): + if isinstance(shape, int): + shape = (shape,) + if dtype is None: + dtype = fill_value.dtype + if not isinstance(shape, (tuple, list)): + shape = (shape,) + return torch.full(shape, fill_value, dtype=dtype) + + +def full_like( + a: ArrayLike, + fill_value, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "K", + subok: NotImplementedType = False, + shape=None, +): + # XXX: fill_value broadcasts + result = torch.full_like(a, fill_value, dtype=dtype) + if shape is not None: + result = result.reshape(shape) + return result + + +def ones( + shape, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "C", + *, + like: NotImplementedType = None, +): + if dtype is None: + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.ones(shape, dtype=dtype) + + +def ones_like( + a: ArrayLike, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "K", + subok: NotImplementedType = False, + shape=None, +): + result = torch.ones_like(a, dtype=dtype) + if shape is not None: + result = result.reshape(shape) + return result + + +def zeros( + shape, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "C", + *, + like: NotImplementedType = None, +): + if dtype is None: + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.zeros(shape, dtype=dtype) + + +def zeros_like( + a: ArrayLike, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "K", + subok: NotImplementedType = False, + shape=None, +): + result = torch.zeros_like(a, dtype=dtype) + if shape is not None: + result = result.reshape(shape) + return result + + +# ### cov & corrcoef ### + + +def _xy_helper_corrcoef(x_tensor, y_tensor=None, rowvar=True): + """Prepare inputs for cov and corrcoef.""" + + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/function_base.py#L2636 + if y_tensor is not None: + # make sure x and y are at least 2D + ndim_extra = 2 - x_tensor.ndim + if ndim_extra > 0: + x_tensor = x_tensor.view((1,) * ndim_extra + x_tensor.shape) + if not rowvar and x_tensor.shape[0] != 1: + x_tensor = x_tensor.mT + x_tensor = x_tensor.clone() + + ndim_extra = 2 - y_tensor.ndim + if ndim_extra > 0: + y_tensor = y_tensor.view((1,) * ndim_extra + y_tensor.shape) + if not rowvar and y_tensor.shape[0] != 1: + y_tensor = y_tensor.mT + y_tensor = y_tensor.clone() + + x_tensor = _concatenate((x_tensor, y_tensor), axis=0) + + return x_tensor + + +def corrcoef( + x: ArrayLike, + y: Optional[ArrayLike] = None, + rowvar=True, + bias=None, + ddof=None, + *, + dtype: Optional[DTypeLike] = None, +): + if bias is not None or ddof is not None: + # deprecated in NumPy + raise NotImplementedError + xy_tensor = _xy_helper_corrcoef(x, y, rowvar) + + is_half = (xy_tensor.dtype == torch.float16) and xy_tensor.is_cpu + if is_half: + # work around torch's "addmm_impl_cpu_" not implemented for 'Half'" + dtype = torch.float32 + + xy_tensor = _util.cast_if_needed(xy_tensor, dtype) + result = torch.corrcoef(xy_tensor) + + if is_half: + result = result.to(torch.float16) + + return result + + +def cov( + m: ArrayLike, + y: Optional[ArrayLike] = None, + rowvar=True, + bias=False, + ddof=None, + fweights: Optional[ArrayLike] = None, + aweights: Optional[ArrayLike] = None, + *, + dtype: Optional[DTypeLike] = None, +): + m = _xy_helper_corrcoef(m, y, rowvar) + + if ddof is None: + ddof = 1 if bias == 0 else 0 + + is_half = (m.dtype == torch.float16) and m.is_cpu + if is_half: + # work around torch's "addmm_impl_cpu_" not implemented for 'Half'" + dtype = torch.float32 + + m = _util.cast_if_needed(m, dtype) + result = torch.cov(m, correction=ddof, aweights=aweights, fweights=fweights) + + if is_half: + result = result.to(torch.float16) + + return result + + +def _conv_corr_impl(a, v, mode): + dt = _dtypes_impl.result_type_impl(a, v) + a = _util.cast_if_needed(a, dt) + v = _util.cast_if_needed(v, dt) + + padding = v.shape[0] - 1 if mode == "full" else mode + + if padding == "same" and v.shape[0] % 2 == 0: + # UserWarning: Using padding='same' with even kernel lengths and odd + # dilation may require a zero-padded copy of the input be created + # (Triggered internally at pytorch/aten/src/ATen/native/Convolution.cpp:1010.) + raise NotImplementedError("mode='same' and even-length weights") + + # NumPy only accepts 1D arrays; PyTorch requires 2D inputs and 3D weights + aa = a[None, :] + vv = v[None, None, :] + + result = torch.nn.functional.conv1d(aa, vv, padding=padding) + + # torch returns a 2D result, numpy returns a 1D array + return result[0, :] + + +def convolve(a: ArrayLike, v: ArrayLike, mode="full"): + # NumPy: if v is longer than a, the arrays are swapped before computation + if a.shape[0] < v.shape[0]: + a, v = v, a + + # flip the weights since numpy does and torch does not + v = torch.flip(v, (0,)) + + return _conv_corr_impl(a, v, mode) + + +def correlate(a: ArrayLike, v: ArrayLike, mode="valid"): + v = torch.conj_physical(v) + return _conv_corr_impl(a, v, mode) + + +# ### logic & element selection ### + + +def bincount(x: ArrayLike, /, weights: Optional[ArrayLike] = None, minlength=0): + if x.numel() == 0: + # edge case allowed by numpy + x = x.new_empty(0, dtype=int) + + int_dtype = _dtypes_impl.default_dtypes().int_dtype + (x,) = _util.typecast_tensors((x,), int_dtype, casting="safe") + + return torch.bincount(x, weights, minlength) + + +def where( + condition: ArrayLike, + x: Optional[ArrayLikeOrScalar] = None, + y: Optional[ArrayLikeOrScalar] = None, + /, +): + if (x is None) != (y is None): + raise ValueError("either both or neither of x and y should be given") + + if condition.dtype != torch.bool: + condition = condition.to(torch.bool) + + if x is None and y is None: + result = torch.where(condition) + else: + result = torch.where(condition, x, y) + return result + + +# ###### module-level queries of object properties + + +def ndim(a: ArrayLike): + return a.ndim + + +def shape(a: ArrayLike): + return tuple(a.shape) + + +def size(a: ArrayLike, axis=None): + if axis is None: + return a.numel() + else: + return a.shape[axis] + + +# ###### shape manipulations and indexing + + +def expand_dims(a: ArrayLike, axis): + shape = _util.expand_shape(a.shape, axis) + return a.view(shape) # never copies + + +def flip(m: ArrayLike, axis=None): + # XXX: semantic difference: np.flip returns a view, torch.flip copies + if axis is None: + axis = tuple(range(m.ndim)) + else: + axis = _util.normalize_axis_tuple(axis, m.ndim) + return torch.flip(m, axis) + + +def flipud(m: ArrayLike): + return torch.flipud(m) + + +def fliplr(m: ArrayLike): + return torch.fliplr(m) + + +def rot90(m: ArrayLike, k=1, axes=(0, 1)): + axes = _util.normalize_axis_tuple(axes, m.ndim) + return torch.rot90(m, k, axes) + + +# ### broadcasting and indices ### + + +def broadcast_to(array: ArrayLike, shape, subok: NotImplementedType = False): + return torch.broadcast_to(array, size=shape) + + +# This is a function from tuples to tuples, so we just reuse it +from torch import broadcast_shapes + + +def broadcast_arrays(*args: ArrayLike, subok: NotImplementedType = False): + return torch.broadcast_tensors(*args) + + +def meshgrid(*xi: ArrayLike, copy=True, sparse=False, indexing="xy"): + ndim = len(xi) + + if indexing not in ["xy", "ij"]: + raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.") + + s0 = (1,) * ndim + output = [x.reshape(s0[:i] + (-1,) + s0[i + 1 :]) for i, x in enumerate(xi)] + + if indexing == "xy" and ndim > 1: + # switch first and second axis + output[0] = output[0].reshape((1, -1) + s0[2:]) + output[1] = output[1].reshape((-1, 1) + s0[2:]) + + if not sparse: + # Return the full N-D matrix (not only the 1-D vector) + output = torch.broadcast_tensors(*output) + + if copy: + output = [x.clone() for x in output] + + return list(output) # match numpy, return a list + + +def indices(dimensions, dtype: Optional[DTypeLike] = int, sparse=False): + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1691-L1791 + dimensions = tuple(dimensions) + N = len(dimensions) + shape = (1,) * N + if sparse: + res = tuple() + else: + res = torch.empty((N,) + dimensions, dtype=dtype) + for i, dim in enumerate(dimensions): + idx = torch.arange(dim, dtype=dtype).reshape( + shape[:i] + (dim,) + shape[i + 1 :] + ) + if sparse: + res = res + (idx,) + else: + res[i] = idx + return res + + +# ### tri*-something ### + + +def tril(m: ArrayLike, k=0): + return torch.tril(m, k) + + +def triu(m: ArrayLike, k=0): + return torch.triu(m, k) + + +def tril_indices(n, k=0, m=None): + if m is None: + m = n + return torch.tril_indices(n, m, offset=k) + + +def triu_indices(n, k=0, m=None): + if m is None: + m = n + return torch.triu_indices(n, m, offset=k) + + +def tril_indices_from(arr: ArrayLike, k=0): + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + # Return a tensor rather than a tuple to avoid a graphbreak + return torch.tril_indices(arr.shape[0], arr.shape[1], offset=k) + + +def triu_indices_from(arr: ArrayLike, k=0): + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + # Return a tensor rather than a tuple to avoid a graphbreak + return torch.triu_indices(arr.shape[0], arr.shape[1], offset=k) + + +def tri( + N, + M=None, + k=0, + dtype: Optional[DTypeLike] = None, + *, + like: NotImplementedType = None, +): + if M is None: + M = N + tensor = torch.ones((N, M), dtype=dtype) + return torch.tril(tensor, diagonal=k) + + +# ### equality, equivalence, allclose ### + + +def isclose(a: ArrayLike, b: ArrayLike, rtol=1.0e-5, atol=1.0e-8, equal_nan=False): + dtype = _dtypes_impl.result_type_impl(a, b) + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + return torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) + + +def allclose(a: ArrayLike, b: ArrayLike, rtol=1e-05, atol=1e-08, equal_nan=False): + dtype = _dtypes_impl.result_type_impl(a, b) + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + return torch.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) + + +def _tensor_equal(a1, a2, equal_nan=False): + # Implementation of array_equal/array_equiv. + if a1.shape != a2.shape: + return False + cond = a1 == a2 + if equal_nan: + cond = cond | (torch.isnan(a1) & torch.isnan(a2)) + return cond.all().item() + + +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan=False): + return _tensor_equal(a1, a2, equal_nan=equal_nan) + + +def array_equiv(a1: ArrayLike, a2: ArrayLike): + # *almost* the same as array_equal: _equiv tries to broadcast, _equal does not + try: + a1_t, a2_t = torch.broadcast_tensors(a1, a2) + except RuntimeError: + # failed to broadcast => not equivalent + return False + return _tensor_equal(a1_t, a2_t) + + +def nan_to_num( + x: ArrayLike, copy: NotImplementedType = True, nan=0.0, posinf=None, neginf=None +): + # work around RuntimeError: "nan_to_num" not implemented for 'ComplexDouble' + if x.is_complex(): + re = torch.nan_to_num(x.real, nan=nan, posinf=posinf, neginf=neginf) + im = torch.nan_to_num(x.imag, nan=nan, posinf=posinf, neginf=neginf) + return re + 1j * im + else: + return torch.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf) + + +# ### put/take_along_axis ### + + +def take( + a: ArrayLike, + indices: ArrayLike, + axis=None, + out: Optional[OutArray] = None, + mode: NotImplementedType = "raise", +): + (a,), axis = _util.axis_none_flatten(a, axis=axis) + axis = _util.normalize_axis_index(axis, a.ndim) + idx = (slice(None),) * axis + (indices, ...) + result = a[idx] + return result + + +def take_along_axis(arr: ArrayLike, indices: ArrayLike, axis): + (arr,), axis = _util.axis_none_flatten(arr, axis=axis) + axis = _util.normalize_axis_index(axis, arr.ndim) + return torch.take_along_dim(arr, indices, axis) + + +def put( + a: NDArray, + indices: ArrayLike, + values: ArrayLike, + mode: NotImplementedType = "raise", +): + v = values.type(a.dtype) + # If indices is larger than v, expand v to at least the size of indices. Any + # unnecessary trailing elements are then trimmed. + if indices.numel() > v.numel(): + ratio = (indices.numel() + v.numel() - 1) // v.numel() + v = v.unsqueeze(0).expand((ratio,) + v.shape) + # Trim unnecessary elements, regardless if v was expanded or not. Note + # np.put() trims v to match indices by default too. + if indices.numel() < v.numel(): + v = v.flatten() + v = v[: indices.numel()] + a.put_(indices, v) + return None + + +def put_along_axis(arr: ArrayLike, indices: ArrayLike, values: ArrayLike, axis): + (arr,), axis = _util.axis_none_flatten(arr, axis=axis) + axis = _util.normalize_axis_index(axis, arr.ndim) + + indices, values = torch.broadcast_tensors(indices, values) + values = _util.cast_if_needed(values, arr.dtype) + result = torch.scatter(arr, axis, indices, values) + arr.copy_(result.reshape(arr.shape)) + return None + + +def choose( + a: ArrayLike, + choices: Sequence[ArrayLike], + out: Optional[OutArray] = None, + mode: NotImplementedType = "raise", +): + # First, broadcast elements of `choices` + choices = torch.stack(torch.broadcast_tensors(*choices)) + + # Use an analog of `gather(choices, 0, a)` which broadcasts `choices` vs `a`: + # (taken from https://github.com/pytorch/pytorch/issues/9407#issuecomment-1427907939) + idx_list = [ + torch.arange(dim).view((1,) * i + (dim,) + (1,) * (choices.ndim - i - 1)) + for i, dim in enumerate(choices.shape) + ] + + idx_list[0] = a + return choices[idx_list].squeeze(0) + + +# ### unique et al ### + + +def unique( + ar: ArrayLike, + return_index: NotImplementedType = False, + return_inverse=False, + return_counts=False, + axis=None, + *, + equal_nan: NotImplementedType = True, +): + (ar,), axis = _util.axis_none_flatten(ar, axis=axis) + axis = _util.normalize_axis_index(axis, ar.ndim) + + result = torch.unique( + ar, return_inverse=return_inverse, return_counts=return_counts, dim=axis + ) + + return result + + +def nonzero(a: ArrayLike): + return torch.nonzero(a, as_tuple=True) + + +def argwhere(a: ArrayLike): + return torch.argwhere(a) + + +def flatnonzero(a: ArrayLike): + return torch.flatten(a).nonzero(as_tuple=True)[0] + + +def clip( + a: ArrayLike, + min: Optional[ArrayLike] = None, + max: Optional[ArrayLike] = None, + out: Optional[OutArray] = None, +): + return torch.clamp(a, min, max) + + +def repeat(a: ArrayLike, repeats: ArrayLikeOrScalar, axis=None): + return torch.repeat_interleave(a, repeats, axis) + + +def tile(A: ArrayLike, reps): + if isinstance(reps, int): + reps = (reps,) + return torch.tile(A, reps) + + +def resize(a: ArrayLike, new_shape=None): + # implementation vendored from + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/fromnumeric.py#L1420-L1497 + if new_shape is None: + return a + + if isinstance(new_shape, int): + new_shape = (new_shape,) + + a = a.flatten() + + new_size = 1 + for dim_length in new_shape: + new_size *= dim_length + if dim_length < 0: + raise ValueError("all elements of `new_shape` must be non-negative") + + if a.numel() == 0 or new_size == 0: + # First case must zero fill. The second would have repeats == 0. + return torch.zeros(new_shape, dtype=a.dtype) + + repeats = -(-new_size // a.numel()) # ceil division + a = concatenate((a,) * repeats)[:new_size] + + return reshape(a, new_shape) + + +# ### diag et al ### + + +def diagonal(a: ArrayLike, offset=0, axis1=0, axis2=1): + axis1 = _util.normalize_axis_index(axis1, a.ndim) + axis2 = _util.normalize_axis_index(axis2, a.ndim) + return torch.diagonal(a, offset, axis1, axis2) + + +def trace( + a: ArrayLike, + offset=0, + axis1=0, + axis2=1, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, +): + result = torch.diagonal(a, offset, dim1=axis1, dim2=axis2).sum(-1, dtype=dtype) + return result + + +def eye( + N, + M=None, + k=0, + dtype: Optional[DTypeLike] = None, + order: NotImplementedType = "C", + *, + like: NotImplementedType = None, +): + if dtype is None: + dtype = _dtypes_impl.default_dtypes().float_dtype + if M is None: + M = N + z = torch.zeros(N, M, dtype=dtype) + z.diagonal(k).fill_(1) + return z + + +def identity(n, dtype: Optional[DTypeLike] = None, *, like: NotImplementedType = None): + return torch.eye(n, dtype=dtype) + + +def diag(v: ArrayLike, k=0): + return torch.diag(v, k) + + +def diagflat(v: ArrayLike, k=0): + return torch.diagflat(v, k) + + +def diag_indices(n, ndim=2): + idx = torch.arange(n) + return (idx,) * ndim + + +def diag_indices_from(arr: ArrayLike): + if not arr.ndim >= 2: + raise ValueError("input array must be at least 2-d") + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + s = arr.shape + if s[1:] != s[:-1]: + raise ValueError("All dimensions of input must be of equal length") + return diag_indices(s[0], arr.ndim) + + +def fill_diagonal(a: ArrayLike, val: ArrayLike, wrap=False): + if a.ndim < 2: + raise ValueError("array must be at least 2-d") + if val.numel() == 0 and not wrap: + a.fill_diagonal_(val) + return a + + if val.ndim == 0: + val = val.unsqueeze(0) + + # torch.Tensor.fill_diagonal_ only accepts scalars + # If the size of val is too large, then val is trimmed + if a.ndim == 2: + tall = a.shape[0] > a.shape[1] + # wrap does nothing for wide matrices... + if not wrap or not tall: + # Never wraps + diag = a.diagonal() + diag.copy_(val[: diag.numel()]) + else: + # wraps and tall... leaving one empty line between diagonals?! + max_, min_ = a.shape + idx = torch.arange(max_ - max_ // (min_ + 1)) + mod = idx % min_ + div = idx // min_ + a[(div * (min_ + 1) + mod, mod)] = val[: idx.numel()] + else: + idx = diag_indices_from(a) + # a.shape = (n, n, ..., n) + a[idx] = val[: a.shape[0]] + + return a + + +def vdot(a: ArrayLike, b: ArrayLike, /): + # 1. torch only accepts 1D arrays, numpy flattens + # 2. torch requires matching dtype, while numpy casts (?) + t_a, t_b = torch.atleast_1d(a, b) + if t_a.ndim > 1: + t_a = t_a.flatten() + if t_b.ndim > 1: + t_b = t_b.flatten() + + dtype = _dtypes_impl.result_type_impl(t_a, t_b) + is_half = dtype == torch.float16 and (t_a.is_cpu or t_b.is_cpu) + is_bool = dtype == torch.bool + + # work around torch's "dot" not implemented for 'Half', 'Bool' + if is_half: + dtype = torch.float32 + elif is_bool: + dtype = torch.uint8 + + t_a = _util.cast_if_needed(t_a, dtype) + t_b = _util.cast_if_needed(t_b, dtype) + + result = torch.vdot(t_a, t_b) + + if is_half: + result = result.to(torch.float16) + elif is_bool: + result = result.to(torch.bool) + + return result + + +def tensordot(a: ArrayLike, b: ArrayLike, axes=2): + if isinstance(axes, (list, tuple)): + axes = [[ax] if isinstance(ax, int) else ax for ax in axes] + + target_dtype = _dtypes_impl.result_type_impl(a, b) + a = _util.cast_if_needed(a, target_dtype) + b = _util.cast_if_needed(b, target_dtype) + + return torch.tensordot(a, b, dims=axes) + + +def dot(a: ArrayLike, b: ArrayLike, out: Optional[OutArray] = None): + dtype = _dtypes_impl.result_type_impl(a, b) + is_bool = dtype == torch.bool + if is_bool: + dtype = torch.uint8 + + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + + if a.ndim == 0 or b.ndim == 0: + result = a * b + else: + result = torch.matmul(a, b) + + if is_bool: + result = result.to(torch.bool) + + return result + + +def inner(a: ArrayLike, b: ArrayLike, /): + dtype = _dtypes_impl.result_type_impl(a, b) + is_half = dtype == torch.float16 and (a.is_cpu or b.is_cpu) + is_bool = dtype == torch.bool + + if is_half: + # work around torch's "addmm_impl_cpu_" not implemented for 'Half'" + dtype = torch.float32 + elif is_bool: + dtype = torch.uint8 + + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + + result = torch.inner(a, b) + + if is_half: + result = result.to(torch.float16) + elif is_bool: + result = result.to(torch.bool) + return result + + +def outer(a: ArrayLike, b: ArrayLike, out: Optional[OutArray] = None): + return torch.outer(a, b) + + +def cross(a: ArrayLike, b: ArrayLike, axisa=-1, axisb=-1, axisc=-1, axis=None): + # implementation vendored from + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1486-L1685 + if axis is not None: + axisa, axisb, axisc = (axis,) * 3 + + # Check axisa and axisb are within bounds + axisa = _util.normalize_axis_index(axisa, a.ndim) + axisb = _util.normalize_axis_index(axisb, b.ndim) + + # Move working axis to the end of the shape + a = torch.moveaxis(a, axisa, -1) + b = torch.moveaxis(b, axisb, -1) + msg = "incompatible dimensions for cross product\n(dimension must be 2 or 3)" + if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): + raise ValueError(msg) + + # Create the output array + shape = broadcast_shapes(a[..., 0].shape, b[..., 0].shape) + if a.shape[-1] == 3 or b.shape[-1] == 3: + shape += (3,) + # Check axisc is within bounds + axisc = _util.normalize_axis_index(axisc, len(shape)) + dtype = _dtypes_impl.result_type_impl(a, b) + cp = torch.empty(shape, dtype=dtype) + + # recast arrays as dtype + a = _util.cast_if_needed(a, dtype) + b = _util.cast_if_needed(b, dtype) + + # create local aliases for readability + a0 = a[..., 0] + a1 = a[..., 1] + if a.shape[-1] == 3: + a2 = a[..., 2] + b0 = b[..., 0] + b1 = b[..., 1] + if b.shape[-1] == 3: + b2 = b[..., 2] + if cp.ndim != 0 and cp.shape[-1] == 3: + cp0 = cp[..., 0] + cp1 = cp[..., 1] + cp2 = cp[..., 2] + + if a.shape[-1] == 2: + if b.shape[-1] == 2: + # a0 * b1 - a1 * b0 + cp[...] = a0 * b1 - a1 * b0 + return cp + else: + assert b.shape[-1] == 3 + # cp0 = a1 * b2 - 0 (a2 = 0) + # cp1 = 0 - a0 * b2 (a2 = 0) + # cp2 = a0 * b1 - a1 * b0 + cp0[...] = a1 * b2 + cp1[...] = -a0 * b2 + cp2[...] = a0 * b1 - a1 * b0 + else: + assert a.shape[-1] == 3 + if b.shape[-1] == 3: + cp0[...] = a1 * b2 - a2 * b1 + cp1[...] = a2 * b0 - a0 * b2 + cp2[...] = a0 * b1 - a1 * b0 + else: + assert b.shape[-1] == 2 + cp0[...] = -a2 * b1 + cp1[...] = a2 * b0 + cp2[...] = a0 * b1 - a1 * b0 + + return torch.moveaxis(cp, -1, axisc) + + +def einsum(*operands, out=None, dtype=None, order="K", casting="safe", optimize=False): + # Have to manually normalize *operands and **kwargs, following the NumPy signature + # We have a local import to avoid poluting the global space, as it will be then + # exported in funcs.py + from ._ndarray import ndarray + from ._normalizations import ( + maybe_copy_to, + normalize_array_like, + normalize_casting, + normalize_dtype, + wrap_tensors, + ) + + dtype = normalize_dtype(dtype) + casting = normalize_casting(casting) + if out is not None and not isinstance(out, ndarray): + raise TypeError("'out' must be an array") + if order != "K": + raise NotImplementedError("'order' parameter is not supported.") + + # parse arrays and normalize them + sublist_format = not isinstance(operands[0], str) + if sublist_format: + # op, str, op, str ... [sublistout] format: normalize every other argument + + # - if sublistout is not given, the length of operands is even, and we pick + # odd-numbered elements, which are arrays. + # - if sublistout is given, the length of operands is odd, we peel off + # the last one, and pick odd-numbered elements, which are arrays. + # Without [:-1], we would have picked sublistout, too. + array_operands = operands[:-1][::2] + else: + # ("ij->", arrays) format + subscripts, array_operands = operands[0], operands[1:] + + tensors = [normalize_array_like(op) for op in array_operands] + target_dtype = _dtypes_impl.result_type_impl(*tensors) if dtype is None else dtype + + # work around 'bmm' not implemented for 'Half' etc + is_half = target_dtype == torch.float16 and all(t.is_cpu for t in tensors) + if is_half: + target_dtype = torch.float32 + + is_short_int = target_dtype in [torch.uint8, torch.int8, torch.int16, torch.int32] + if is_short_int: + target_dtype = torch.int64 + + tensors = _util.typecast_tensors(tensors, target_dtype, casting) + + from torch.backends import opt_einsum + + try: + # set the global state to handle the optimize=... argument, restore on exit + if opt_einsum.is_available(): + old_strategy = torch.backends.opt_einsum.strategy + old_enabled = torch.backends.opt_einsum.enabled + + # torch.einsum calls opt_einsum.contract_path, which runs into + # https://github.com/dgasmith/opt_einsum/issues/219 + # for strategy={True, False} + if optimize is True: + optimize = "auto" + elif optimize is False: + torch.backends.opt_einsum.enabled = False + + torch.backends.opt_einsum.strategy = optimize + + if sublist_format: + # recombine operands + sublists = operands[1::2] + has_sublistout = len(operands) % 2 == 1 + if has_sublistout: + sublistout = operands[-1] + operands = list(itertools.chain.from_iterable(zip(tensors, sublists))) + if has_sublistout: + operands.append(sublistout) + + result = torch.einsum(*operands) + else: + result = torch.einsum(subscripts, *tensors) + + finally: + if opt_einsum.is_available(): + torch.backends.opt_einsum.strategy = old_strategy + torch.backends.opt_einsum.enabled = old_enabled + + result = maybe_copy_to(out, result) + return wrap_tensors(result) + + +# ### sort and partition ### + + +def _sort_helper(tensor, axis, kind, order): + if tensor.dtype.is_complex: + raise NotImplementedError(f"sorting {tensor.dtype} is not supported") + (tensor,), axis = _util.axis_none_flatten(tensor, axis=axis) + axis = _util.normalize_axis_index(axis, tensor.ndim) + + stable = kind == "stable" + + return tensor, axis, stable + + +def sort(a: ArrayLike, axis=-1, kind=None, order: NotImplementedType = None): + # `order` keyword arg is only relevant for structured dtypes; so not supported here. + a, axis, stable = _sort_helper(a, axis, kind, order) + result = torch.sort(a, dim=axis, stable=stable) + return result.values + + +def argsort(a: ArrayLike, axis=-1, kind=None, order: NotImplementedType = None): + a, axis, stable = _sort_helper(a, axis, kind, order) + return torch.argsort(a, dim=axis, stable=stable) + + +def searchsorted( + a: ArrayLike, v: ArrayLike, side="left", sorter: Optional[ArrayLike] = None +): + if a.dtype.is_complex: + raise NotImplementedError(f"searchsorted with dtype={a.dtype}") + + return torch.searchsorted(a, v, side=side, sorter=sorter) + + +# ### swap/move/roll axis ### + + +def moveaxis(a: ArrayLike, source, destination): + source = _util.normalize_axis_tuple(source, a.ndim, "source") + destination = _util.normalize_axis_tuple(destination, a.ndim, "destination") + return torch.moveaxis(a, source, destination) + + +def swapaxes(a: ArrayLike, axis1, axis2): + axis1 = _util.normalize_axis_index(axis1, a.ndim) + axis2 = _util.normalize_axis_index(axis2, a.ndim) + return torch.swapaxes(a, axis1, axis2) + + +def rollaxis(a: ArrayLike, axis, start=0): + # Straight vendor from: + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1259 + # + # Also note this function in NumPy is mostly retained for backwards compat + # (https://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing) + # so let's not touch it unless hard pressed. + n = a.ndim + axis = _util.normalize_axis_index(axis, n) + if start < 0: + start += n + msg = "'%s' arg requires %d <= %s < %d, but %d was passed in" + if not (0 <= start < n + 1): + raise _util.AxisError(msg % ("start", -n, "start", n + 1, start)) + if axis < start: + # it's been removed + start -= 1 + if axis == start: + # numpy returns a view, here we try returning the tensor itself + # return tensor[...] + return a + axes = list(range(0, n)) + axes.remove(axis) + axes.insert(start, axis) + return a.view(axes) + + +def roll(a: ArrayLike, shift, axis=None): + if axis is not None: + axis = _util.normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) + if not isinstance(shift, tuple): + shift = (shift,) * len(axis) + return torch.roll(a, shift, axis) + + +# ### shape manipulations ### + + +def squeeze(a: ArrayLike, axis=None): + if axis == (): + result = a + elif axis is None: + result = a.squeeze() + else: + if isinstance(axis, tuple): + result = a + for ax in axis: + result = a.squeeze(ax) + else: + result = a.squeeze(axis) + return result + + +def reshape(a: ArrayLike, newshape, order: NotImplementedType = "C"): + # if sh = (1, 2, 3), numpy allows both .reshape(sh) and .reshape(*sh) + newshape = newshape[0] if len(newshape) == 1 else newshape + return a.reshape(newshape) + + +# NB: cannot use torch.reshape(a, newshape) above, because of +# (Pdb) torch.reshape(torch.as_tensor([1]), 1) +# *** TypeError: reshape(): argument 'shape' (position 2) must be tuple of SymInts, not int + + +def transpose(a: ArrayLike, axes=None): + # numpy allows both .transpose(sh) and .transpose(*sh) + # also older code uses axes being a list + if axes in [(), None, (None,)]: + axes = tuple(reversed(range(a.ndim))) + elif len(axes) == 1: + axes = axes[0] + return a.permute(axes) + + +def ravel(a: ArrayLike, order: NotImplementedType = "C"): + return torch.flatten(a) + + +def diff( + a: ArrayLike, + n=1, + axis=-1, + prepend: Optional[ArrayLike] = None, + append: Optional[ArrayLike] = None, +): + axis = _util.normalize_axis_index(axis, a.ndim) + + if n < 0: + raise ValueError(f"order must be non-negative but got {n}") + + if n == 0: + # match numpy and return the input immediately + return a + + if prepend is not None: + shape = list(a.shape) + shape[axis] = prepend.shape[axis] if prepend.ndim > 0 else 1 + prepend = torch.broadcast_to(prepend, shape) + + if append is not None: + shape = list(a.shape) + shape[axis] = append.shape[axis] if append.ndim > 0 else 1 + append = torch.broadcast_to(append, shape) + + return torch.diff(a, n, axis=axis, prepend=prepend, append=append) + + +# ### math functions ### + + +def angle(z: ArrayLike, deg=False): + result = torch.angle(z) + if deg: + result = result * (180 / torch.pi) + return result + + +def sinc(x: ArrayLike): + return torch.sinc(x) + + +# NB: have to normalize *varargs manually +def gradient(f: ArrayLike, *varargs, axis=None, edge_order=1): + N = f.ndim # number of dimensions + + varargs = _util.ndarrays_to_tensors(varargs) + + if axis is None: + axes = tuple(range(N)) + else: + axes = _util.normalize_axis_tuple(axis, N) + + len_axes = len(axes) + n = len(varargs) + if n == 0: + # no spacing argument - use 1 in all axes + dx = [1.0] * len_axes + elif n == 1 and (_dtypes_impl.is_scalar(varargs[0]) or varargs[0].ndim == 0): + # single scalar or 0D tensor for all axes (np.ndim(varargs[0]) == 0) + dx = varargs * len_axes + elif n == len_axes: + # scalar or 1d array for each axis + dx = list(varargs) + for i, distances in enumerate(dx): + distances = torch.as_tensor(distances) + if distances.ndim == 0: + continue + elif distances.ndim != 1: + raise ValueError("distances must be either scalars or 1d") + if len(distances) != f.shape[axes[i]]: + raise ValueError( + "when 1d, distances must match " + "the length of the corresponding dimension" + ) + if not (distances.dtype.is_floating_point or distances.dtype.is_complex): + distances = distances.double() + + diffx = torch.diff(distances) + # if distances are constant reduce to the scalar case + # since it brings a consistent speedup + if (diffx == diffx[0]).all(): + diffx = diffx[0] + dx[i] = diffx + else: + raise TypeError("invalid number of arguments") + + if edge_order > 2: + raise ValueError("'edge_order' greater than 2 not supported") + + # use central differences on interior and one-sided differences on the + # endpoints. This preserves second order-accuracy over the full domain. + + outvals = [] + + # create slice objects --- initially all are [:, :, ..., :] + slice1 = [slice(None)] * N + slice2 = [slice(None)] * N + slice3 = [slice(None)] * N + slice4 = [slice(None)] * N + + otype = f.dtype + if _dtypes_impl.python_type_for_torch(otype) in (int, bool): + # Convert to floating point. + # First check if f is a numpy integer type; if so, convert f to float64 + # to avoid modular arithmetic when computing the changes in f. + f = f.double() + otype = torch.float64 + + for axis, ax_dx in zip(axes, dx): + if f.shape[axis] < edge_order + 1: + raise ValueError( + "Shape of array too small to calculate a numerical gradient, " + "at least (edge_order + 1) elements are required." + ) + # result allocation + out = torch.empty_like(f, dtype=otype) + + # spacing for the current axis (NB: np.ndim(ax_dx) == 0) + uniform_spacing = _dtypes_impl.is_scalar(ax_dx) or ax_dx.ndim == 0 + + # Numerical differentiation: 2nd order interior + slice1[axis] = slice(1, -1) + slice2[axis] = slice(None, -2) + slice3[axis] = slice(1, -1) + slice4[axis] = slice(2, None) + + if uniform_spacing: + out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2.0 * ax_dx) + else: + dx1 = ax_dx[0:-1] + dx2 = ax_dx[1:] + a = -(dx2) / (dx1 * (dx1 + dx2)) + b = (dx2 - dx1) / (dx1 * dx2) + c = dx1 / (dx2 * (dx1 + dx2)) + # fix the shape for broadcasting + shape = [1] * N + shape[axis] = -1 + a = a.reshape(shape) + b = b.reshape(shape) + c = c.reshape(shape) + # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] + out[tuple(slice1)] = ( + a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + ) + + # Numerical differentiation: 1st order edges + if edge_order == 1: + slice1[axis] = 0 + slice2[axis] = 1 + slice3[axis] = 0 + dx_0 = ax_dx if uniform_spacing else ax_dx[0] + # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 + + slice1[axis] = -1 + slice2[axis] = -1 + slice3[axis] = -2 + dx_n = ax_dx if uniform_spacing else ax_dx[-1] + # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n + + # Numerical differentiation: 2nd order edges + else: + slice1[axis] = 0 + slice2[axis] = 0 + slice3[axis] = 1 + slice4[axis] = 2 + if uniform_spacing: + a = -1.5 / ax_dx + b = 2.0 / ax_dx + c = -0.5 / ax_dx + else: + dx1 = ax_dx[0] + dx2 = ax_dx[1] + a = -(2.0 * dx1 + dx2) / (dx1 * (dx1 + dx2)) + b = (dx1 + dx2) / (dx1 * dx2) + c = -dx1 / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] + out[tuple(slice1)] = ( + a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + ) + + slice1[axis] = -1 + slice2[axis] = -3 + slice3[axis] = -2 + slice4[axis] = -1 + if uniform_spacing: + a = 0.5 / ax_dx + b = -2.0 / ax_dx + c = 1.5 / ax_dx + else: + dx1 = ax_dx[-2] + dx2 = ax_dx[-1] + a = (dx2) / (dx1 * (dx1 + dx2)) + b = -(dx2 + dx1) / (dx1 * dx2) + c = (2.0 * dx2 + dx1) / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] + out[tuple(slice1)] = ( + a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] + ) + + outvals.append(out) + + # reset the slice object in this dimension to ":" + slice1[axis] = slice(None) + slice2[axis] = slice(None) + slice3[axis] = slice(None) + slice4[axis] = slice(None) + + if len_axes == 1: + return outvals[0] + else: + return outvals + + +# ### Type/shape etc queries ### + + +def round(a: ArrayLike, decimals=0, out: Optional[OutArray] = None): + if a.is_floating_point(): + result = torch.round(a, decimals=decimals) + elif a.is_complex(): + # RuntimeError: "round_cpu" not implemented for 'ComplexFloat' + result = torch.complex( + torch.round(a.real, decimals=decimals), + torch.round(a.imag, decimals=decimals), + ) + else: + # RuntimeError: "round_cpu" not implemented for 'int' + result = a + return result + + +around = round +round_ = round + + +def real_if_close(a: ArrayLike, tol=100): + if not torch.is_complex(a): + return a + if tol > 1: + # Undocumented in numpy: if tol < 1, it's an absolute tolerance! + # Otherwise, tol > 1 is relative tolerance, in units of the dtype epsilon + # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L577 + tol = tol * torch.finfo(a.dtype).eps + + mask = torch.abs(a.imag) < tol + return a.real if mask.all() else a + + +def real(a: ArrayLike): + return torch.real(a) + + +def imag(a: ArrayLike): + if a.is_complex(): + return a.imag + return torch.zeros_like(a) + + +def iscomplex(x: ArrayLike): + if torch.is_complex(x): + return x.imag != 0 + return torch.zeros_like(x, dtype=torch.bool) + + +def isreal(x: ArrayLike): + if torch.is_complex(x): + return x.imag == 0 + return torch.ones_like(x, dtype=torch.bool) + + +def iscomplexobj(x: ArrayLike): + return torch.is_complex(x) + + +def isrealobj(x: ArrayLike): + return not torch.is_complex(x) + + +def isneginf(x: ArrayLike, out: Optional[OutArray] = None): + return torch.isneginf(x) + + +def isposinf(x: ArrayLike, out: Optional[OutArray] = None): + return torch.isposinf(x) + + +def i0(x: ArrayLike): + return torch.special.i0(x) + + +def isscalar(a): + # We need to use normalize_array_like, but we don't want to export it in funcs.py + from ._normalizations import normalize_array_like + + try: + t = normalize_array_like(a) + return t.numel() == 1 + except Exception: + return False + + +# ### Filter windows ### + + +def hamming(M): + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.hamming_window(M, periodic=False, dtype=dtype) + + +def hanning(M): + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.hann_window(M, periodic=False, dtype=dtype) + + +def kaiser(M, beta): + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.kaiser_window(M, beta=beta, periodic=False, dtype=dtype) + + +def blackman(M): + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.blackman_window(M, periodic=False, dtype=dtype) + + +def bartlett(M): + dtype = _dtypes_impl.default_dtypes().float_dtype + return torch.bartlett_window(M, periodic=False, dtype=dtype) + + +# ### Dtype routines ### + +# vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L666 + + +array_type = [ + [torch.float16, torch.float32, torch.float64], + [None, torch.complex64, torch.complex128], +] +array_precision = { + torch.float16: 0, + torch.float32: 1, + torch.float64: 2, + torch.complex64: 1, + torch.complex128: 2, +} + + +def common_type(*tensors: ArrayLike): + is_complex = False + precision = 0 + for a in tensors: + t = a.dtype + if iscomplexobj(a): + is_complex = True + if not (t.is_floating_point or t.is_complex): + p = 2 # array_precision[_nx.double] + else: + p = array_precision.get(t, None) + if p is None: + raise TypeError("can't get common type for non-numeric array") + precision = builtins.max(precision, p) + if is_complex: + return array_type[1][precision] + else: + return array_type[0][precision] + + +# ### histograms ### + + +def histogram( + a: ArrayLike, + bins: ArrayLike = 10, + range=None, + normed=None, + weights: Optional[ArrayLike] = None, + density=None, +): + if normed is not None: + raise ValueError("normed argument is deprecated, use density= instead") + + if weights is not None and weights.dtype.is_complex: + raise NotImplementedError("complex weights histogram.") + + is_a_int = not (a.dtype.is_floating_point or a.dtype.is_complex) + is_w_int = weights is None or not weights.dtype.is_floating_point + if is_a_int: + a = a.double() + + if weights is not None: + weights = _util.cast_if_needed(weights, a.dtype) + + if isinstance(bins, torch.Tensor): + if bins.ndim == 0: + # bins was a single int + bins = operator.index(bins) + else: + bins = _util.cast_if_needed(bins, a.dtype) + + if range is None: + h, b = torch.histogram(a, bins, weight=weights, density=bool(density)) + else: + h, b = torch.histogram( + a, bins, range=range, weight=weights, density=bool(density) + ) + + if not density and is_w_int: + h = h.long() + if is_a_int: + b = b.long() + + return h, b + + +def histogram2d( + x, + y, + bins=10, + range: Optional[ArrayLike] = None, + normed=None, + weights: Optional[ArrayLike] = None, + density=None, +): + # vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/twodim_base.py#L655-L821 + if len(x) != len(y): + raise ValueError("x and y must have the same length.") + + try: + N = len(bins) + except TypeError: + N = 1 + + if N != 1 and N != 2: + bins = [bins, bins] + + h, e = histogramdd((x, y), bins, range, normed, weights, density) + + return h, e[0], e[1] + + +def histogramdd( + sample, + bins=10, + range: Optional[ArrayLike] = None, + normed=None, + weights: Optional[ArrayLike] = None, + density=None, +): + # have to normalize manually because `sample` interpretation differs + # for a list of lists and a 2D array + if normed is not None: + raise ValueError("normed argument is deprecated, use density= instead") + + from ._normalizations import normalize_array_like, normalize_seq_array_like + + if isinstance(sample, (list, tuple)): + sample = normalize_array_like(sample).T + else: + sample = normalize_array_like(sample) + + sample = torch.atleast_2d(sample) + + if not (sample.dtype.is_floating_point or sample.dtype.is_complex): + sample = sample.double() + + # bins is either an int, or a sequence of ints or a sequence of arrays + bins_is_array = not ( + isinstance(bins, int) or builtins.all(isinstance(b, int) for b in bins) + ) + if bins_is_array: + bins = normalize_seq_array_like(bins) + bins_dtypes = [b.dtype for b in bins] + bins = [_util.cast_if_needed(b, sample.dtype) for b in bins] + + if range is not None: + range = range.flatten().tolist() + + if weights is not None: + # range=... is required : interleave min and max values per dimension + mm = sample.aminmax(dim=0) + range = torch.cat(mm).reshape(2, -1).T.flatten() + range = tuple(range.tolist()) + weights = _util.cast_if_needed(weights, sample.dtype) + w_kwd = {"weight": weights} + else: + w_kwd = {} + + h, b = torch.histogramdd(sample, bins, range, density=bool(density), **w_kwd) + + if bins_is_array: + b = [_util.cast_if_needed(bb, dtyp) for bb, dtyp in zip(b, bins_dtypes)] + + return h, b + + +# ### odds and ends + + +def min_scalar_type(a: ArrayLike, /): + # https://github.com/numpy/numpy/blob/maintenance/1.24.x/numpy/core/src/multiarray/convert_datatype.c#L1288 + + from ._dtypes import DType + + if a.numel() > 1: + # numpy docs: "For non-scalar array a, returns the vector’s dtype unmodified." + return DType(a.dtype) + + if a.dtype == torch.bool: + dtype = torch.bool + + elif a.dtype.is_complex: + fi = torch.finfo(torch.float32) + fits_in_single = a.dtype == torch.complex64 or ( + fi.min <= a.real <= fi.max and fi.min <= a.imag <= fi.max + ) + dtype = torch.complex64 if fits_in_single else torch.complex128 + + elif a.dtype.is_floating_point: + for dt in [torch.float16, torch.float32, torch.float64]: + fi = torch.finfo(dt) + if fi.min <= a <= fi.max: + dtype = dt + break + else: + # must be integer + for dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]: + # Prefer unsigned int where possible, as numpy does. + ii = torch.iinfo(dt) + if ii.min <= a <= ii.max: + dtype = dt + break + + return DType(dtype) + + +def pad(array: ArrayLike, pad_width: ArrayLike, mode="constant", **kwargs): + if mode != "constant": + raise NotImplementedError + value = kwargs.get("constant_values", 0) + # `value` must be a python scalar for torch.nn.functional.pad + typ = _dtypes_impl.python_type_for_torch(array.dtype) + value = typ(value) + + pad_width = torch.broadcast_to(pad_width, (array.ndim, 2)) + pad_width = torch.flip(pad_width, (0,)).flatten() + + return torch.nn.functional.pad(array, tuple(pad_width), value=value) diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/_getlimits.py b/venv/lib/python3.10/site-packages/torch/_numpy/_getlimits.py new file mode 100644 index 0000000000000000000000000000000000000000..b0c46094e8782f8309ba6cfa15cb59f93cf667a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/_getlimits.py @@ -0,0 +1,15 @@ +# mypy: ignore-errors + +import torch + +from . import _dtypes + + +def finfo(dtyp): + torch_dtype = _dtypes.dtype(dtyp).torch_dtype + return torch.finfo(torch_dtype) + + +def iinfo(dtyp): + torch_dtype = _dtypes.dtype(dtyp).torch_dtype + return torch.iinfo(torch_dtype) diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/_ndarray.py b/venv/lib/python3.10/site-packages/torch/_numpy/_ndarray.py new file mode 100644 index 0000000000000000000000000000000000000000..c93ba2005842ce257231e220d3d3ae51c2d7ec14 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/_ndarray.py @@ -0,0 +1,591 @@ +# mypy: ignore-errors + +from __future__ import annotations + +import builtins +import math +import operator +from typing import Sequence + +import torch + +from . import _dtypes, _dtypes_impl, _funcs, _ufuncs, _util +from ._normalizations import ( + ArrayLike, + normalize_array_like, + normalizer, + NotImplementedType, +) + +newaxis = None + +FLAGS = [ + "C_CONTIGUOUS", + "F_CONTIGUOUS", + "OWNDATA", + "WRITEABLE", + "ALIGNED", + "WRITEBACKIFCOPY", + "FNC", + "FORC", + "BEHAVED", + "CARRAY", + "FARRAY", +] + +SHORTHAND_TO_FLAGS = { + "C": "C_CONTIGUOUS", + "F": "F_CONTIGUOUS", + "O": "OWNDATA", + "W": "WRITEABLE", + "A": "ALIGNED", + "X": "WRITEBACKIFCOPY", + "B": "BEHAVED", + "CA": "CARRAY", + "FA": "FARRAY", +} + + +class Flags: + def __init__(self, flag_to_value: dict): + assert all(k in FLAGS for k in flag_to_value.keys()) # sanity check + self._flag_to_value = flag_to_value + + def __getattr__(self, attr: str): + if attr.islower() and attr.upper() in FLAGS: + return self[attr.upper()] + else: + raise AttributeError(f"No flag attribute '{attr}'") + + def __getitem__(self, key): + if key in SHORTHAND_TO_FLAGS.keys(): + key = SHORTHAND_TO_FLAGS[key] + if key in FLAGS: + try: + return self._flag_to_value[key] + except KeyError as e: + raise NotImplementedError(f"{key=}") from e + else: + raise KeyError(f"No flag key '{key}'") + + def __setattr__(self, attr, value): + if attr.islower() and attr.upper() in FLAGS: + self[attr.upper()] = value + else: + super().__setattr__(attr, value) + + def __setitem__(self, key, value): + if key in FLAGS or key in SHORTHAND_TO_FLAGS.keys(): + raise NotImplementedError("Modifying flags is not implemented") + else: + raise KeyError(f"No flag key '{key}'") + + +def create_method(fn, name=None): + name = name or fn.__name__ + + def f(*args, **kwargs): + return fn(*args, **kwargs) + + f.__name__ = name + f.__qualname__ = f"ndarray.{name}" + return f + + +# Map ndarray.name_method -> np.name_func +# If name_func == None, it means that name_method == name_func +methods = { + "clip": None, + "nonzero": None, + "repeat": None, + "round": None, + "squeeze": None, + "swapaxes": None, + "ravel": None, + # linalg + "diagonal": None, + "dot": None, + "trace": None, + # sorting + "argsort": None, + "searchsorted": None, + # reductions + "argmax": None, + "argmin": None, + "any": None, + "all": None, + "max": None, + "min": None, + "ptp": None, + "sum": None, + "prod": None, + "mean": None, + "var": None, + "std": None, + # scans + "cumsum": None, + "cumprod": None, + # advanced indexing + "take": None, + "choose": None, +} + +dunder = { + "abs": "absolute", + "invert": None, + "pos": "positive", + "neg": "negative", + "gt": "greater", + "lt": "less", + "ge": "greater_equal", + "le": "less_equal", +} + +# dunder methods with right-looking and in-place variants +ri_dunder = { + "add": None, + "sub": "subtract", + "mul": "multiply", + "truediv": "divide", + "floordiv": "floor_divide", + "pow": "power", + "mod": "remainder", + "and": "bitwise_and", + "or": "bitwise_or", + "xor": "bitwise_xor", + "lshift": "left_shift", + "rshift": "right_shift", + "matmul": None, +} + + +def _upcast_int_indices(index): + if isinstance(index, torch.Tensor): + if index.dtype in (torch.int8, torch.int16, torch.int32, torch.uint8): + return index.to(torch.int64) + elif isinstance(index, tuple): + return tuple(_upcast_int_indices(i) for i in index) + return index + + +# Used to indicate that a parameter is unspecified (as opposed to explicitly +# `None`) +class _Unspecified: + pass + + +_Unspecified.unspecified = _Unspecified() + +############################################################### +# ndarray class # +############################################################### + + +class ndarray: + def __init__(self, t=None): + if t is None: + self.tensor = torch.Tensor() + elif isinstance(t, torch.Tensor): + self.tensor = t + else: + raise ValueError( + "ndarray constructor is not recommended; prefer" + "either array(...) or zeros/empty(...)" + ) + + # Register NumPy functions as methods + for method, name in methods.items(): + fn = getattr(_funcs, name or method) + vars()[method] = create_method(fn, method) + + # Regular methods but coming from ufuncs + conj = create_method(_ufuncs.conjugate, "conj") + conjugate = create_method(_ufuncs.conjugate) + + for method, name in dunder.items(): + fn = getattr(_ufuncs, name or method) + method = f"__{method}__" + vars()[method] = create_method(fn, method) + + for method, name in ri_dunder.items(): + fn = getattr(_ufuncs, name or method) + plain = f"__{method}__" + vars()[plain] = create_method(fn, plain) + rvar = f"__r{method}__" + vars()[rvar] = create_method(lambda self, other, fn=fn: fn(other, self), rvar) + ivar = f"__i{method}__" + vars()[ivar] = create_method( + lambda self, other, fn=fn: fn(self, other, out=self), ivar + ) + + # There's no __idivmod__ + __divmod__ = create_method(_ufuncs.divmod, "__divmod__") + __rdivmod__ = create_method( + lambda self, other: _ufuncs.divmod(other, self), "__rdivmod__" + ) + + # prevent loop variables leaking into the ndarray class namespace + del ivar, rvar, name, plain, fn, method + + @property + def shape(self): + return tuple(self.tensor.shape) + + @property + def size(self): + return self.tensor.numel() + + @property + def ndim(self): + return self.tensor.ndim + + @property + def dtype(self): + return _dtypes.dtype(self.tensor.dtype) + + @property + def strides(self): + elsize = self.tensor.element_size() + return tuple(stride * elsize for stride in self.tensor.stride()) + + @property + def itemsize(self): + return self.tensor.element_size() + + @property + def flags(self): + # Note contiguous in torch is assumed C-style + return Flags( + { + "C_CONTIGUOUS": self.tensor.is_contiguous(), + "F_CONTIGUOUS": self.T.tensor.is_contiguous(), + "OWNDATA": self.tensor._base is None, + "WRITEABLE": True, # pytorch does not have readonly tensors + } + ) + + @property + def data(self): + return self.tensor.data_ptr() + + @property + def nbytes(self): + return self.tensor.storage().nbytes() + + @property + def T(self): + return self.transpose() + + @property + def real(self): + return _funcs.real(self) + + @real.setter + def real(self, value): + self.tensor.real = asarray(value).tensor + + @property + def imag(self): + return _funcs.imag(self) + + @imag.setter + def imag(self, value): + self.tensor.imag = asarray(value).tensor + + # ctors + def astype(self, dtype, order="K", casting="unsafe", subok=True, copy=True): + if order != "K": + raise NotImplementedError(f"astype(..., order={order} is not implemented.") + if casting != "unsafe": + raise NotImplementedError( + f"astype(..., casting={casting} is not implemented." + ) + if not subok: + raise NotImplementedError(f"astype(..., subok={subok} is not implemented.") + if not copy: + raise NotImplementedError(f"astype(..., copy={copy} is not implemented.") + torch_dtype = _dtypes.dtype(dtype).torch_dtype + t = self.tensor.to(torch_dtype) + return ndarray(t) + + @normalizer + def copy(self: ArrayLike, order: NotImplementedType = "C"): + return self.clone() + + @normalizer + def flatten(self: ArrayLike, order: NotImplementedType = "C"): + return torch.flatten(self) + + def resize(self, *new_shape, refcheck=False): + # NB: differs from np.resize: fills with zeros instead of making repeated copies of input. + if refcheck: + raise NotImplementedError( + f"resize(..., refcheck={refcheck} is not implemented." + ) + if new_shape in [(), (None,)]: + return + + # support both x.resize((2, 2)) and x.resize(2, 2) + if len(new_shape) == 1: + new_shape = new_shape[0] + if isinstance(new_shape, int): + new_shape = (new_shape,) + + if builtins.any(x < 0 for x in new_shape): + raise ValueError("all elements of `new_shape` must be non-negative") + + new_numel, old_numel = math.prod(new_shape), self.tensor.numel() + + self.tensor.resize_(new_shape) + + if new_numel >= old_numel: + # zero-fill new elements + assert self.tensor.is_contiguous() + b = self.tensor.flatten() # does not copy + b[old_numel:].zero_() + + def view(self, dtype=_Unspecified.unspecified, type=_Unspecified.unspecified): + if dtype is _Unspecified.unspecified: + dtype = self.dtype + if type is not _Unspecified.unspecified: + raise NotImplementedError(f"view(..., type={type} is not implemented.") + torch_dtype = _dtypes.dtype(dtype).torch_dtype + tview = self.tensor.view(torch_dtype) + return ndarray(tview) + + @normalizer + def fill(self, value: ArrayLike): + # Both Pytorch and NumPy accept 0D arrays/tensors and scalars, and + # error out on D > 0 arrays + self.tensor.fill_(value) + + def tolist(self): + return self.tensor.tolist() + + def __iter__(self): + return (ndarray(x) for x in self.tensor.__iter__()) + + def __str__(self): + return ( + str(self.tensor) + .replace("tensor", "torch.ndarray") + .replace("dtype=torch.", "dtype=") + ) + + __repr__ = create_method(__str__) + + def __eq__(self, other): + try: + return _ufuncs.equal(self, other) + except (RuntimeError, TypeError): + # Failed to convert other to array: definitely not equal. + falsy = torch.full(self.shape, fill_value=False, dtype=bool) + return asarray(falsy) + + def __ne__(self, other): + return ~(self == other) + + def __index__(self): + try: + return operator.index(self.tensor.item()) + except Exception as exc: + raise TypeError( + "only integer scalar arrays can be converted to a scalar index" + ) from exc + + def __bool__(self): + return bool(self.tensor) + + def __int__(self): + return int(self.tensor) + + def __float__(self): + return float(self.tensor) + + def __complex__(self): + return complex(self.tensor) + + def is_integer(self): + try: + v = self.tensor.item() + result = int(v) == v + except Exception: + result = False + return result + + def __len__(self): + return self.tensor.shape[0] + + def __contains__(self, x): + return self.tensor.__contains__(x) + + def transpose(self, *axes): + # np.transpose(arr, axis=None) but arr.transpose(*axes) + return _funcs.transpose(self, axes) + + def reshape(self, *shape, order="C"): + # arr.reshape(shape) and arr.reshape(*shape) + return _funcs.reshape(self, shape, order=order) + + def sort(self, axis=-1, kind=None, order=None): + # ndarray.sort works in-place + _funcs.copyto(self, _funcs.sort(self, axis, kind, order)) + + def item(self, *args): + # Mimic NumPy's implementation with three special cases (no arguments, + # a flat index and a multi-index): + # https://github.com/numpy/numpy/blob/main/numpy/core/src/multiarray/methods.c#L702 + if args == (): + return self.tensor.item() + elif len(args) == 1: + # int argument + return self.ravel()[args[0]] + else: + return self.__getitem__(args) + + def __getitem__(self, index): + tensor = self.tensor + + def neg_step(i, s): + if not (isinstance(s, slice) and s.step is not None and s.step < 0): + return s + + nonlocal tensor + tensor = torch.flip(tensor, (i,)) + + # Account for the fact that a slice includes the start but not the end + assert isinstance(s.start, int) or s.start is None + assert isinstance(s.stop, int) or s.stop is None + start = s.stop + 1 if s.stop else None + stop = s.start + 1 if s.start else None + + return slice(start, stop, -s.step) + + if isinstance(index, Sequence): + index = type(index)(neg_step(i, s) for i, s in enumerate(index)) + else: + index = neg_step(0, index) + index = _util.ndarrays_to_tensors(index) + index = _upcast_int_indices(index) + return ndarray(tensor.__getitem__(index)) + + def __setitem__(self, index, value): + index = _util.ndarrays_to_tensors(index) + index = _upcast_int_indices(index) + + if not _dtypes_impl.is_scalar(value): + value = normalize_array_like(value) + value = _util.cast_if_needed(value, self.tensor.dtype) + + return self.tensor.__setitem__(index, value) + + take = _funcs.take + put = _funcs.put + + def __dlpack__(self, *, stream=None): + return self.tensor.__dlpack__(stream=stream) + + def __dlpack_device__(self): + return self.tensor.__dlpack_device__() + + +def _tolist(obj): + """Recursively convert tensors into lists.""" + a1 = [] + for elem in obj: + if isinstance(elem, (list, tuple)): + elem = _tolist(elem) + if isinstance(elem, ndarray): + a1.append(elem.tensor.tolist()) + else: + a1.append(elem) + return a1 + + +# This is the ideally the only place which talks to ndarray directly. +# The rest goes through asarray (preferred) or array. + + +def array(obj, dtype=None, *, copy=True, order="K", subok=False, ndmin=0, like=None): + if subok is not False: + raise NotImplementedError("'subok' parameter is not supported.") + if like is not None: + raise NotImplementedError("'like' parameter is not supported.") + if order != "K": + raise NotImplementedError() + + # a happy path + if ( + isinstance(obj, ndarray) + and copy is False + and dtype is None + and ndmin <= obj.ndim + ): + return obj + + if isinstance(obj, (list, tuple)): + # FIXME and they have the same dtype, device, etc + if obj and all(isinstance(x, torch.Tensor) for x in obj): + # list of arrays: *under torch.Dynamo* these are FakeTensors + obj = torch.stack(obj) + else: + # XXX: remove tolist + # lists of ndarrays: [1, [2, 3], ndarray(4)] convert to lists of lists + obj = _tolist(obj) + + # is obj an ndarray already? + if isinstance(obj, ndarray): + obj = obj.tensor + + # is a specific dtype requested? + torch_dtype = None + if dtype is not None: + torch_dtype = _dtypes.dtype(dtype).torch_dtype + + tensor = _util._coerce_to_tensor(obj, torch_dtype, copy, ndmin) + return ndarray(tensor) + + +def asarray(a, dtype=None, order="K", *, like=None): + return array(a, dtype=dtype, order=order, like=like, copy=False, ndmin=0) + + +def ascontiguousarray(a, dtype=None, *, like=None): + arr = asarray(a, dtype=dtype, like=like) + if not arr.tensor.is_contiguous(): + arr.tensor = arr.tensor.contiguous() + return arr + + +def from_dlpack(x, /): + t = torch.from_dlpack(x) + return ndarray(t) + + +def _extract_dtype(entry): + try: + dty = _dtypes.dtype(entry) + except Exception: + dty = asarray(entry).dtype + return dty + + +def can_cast(from_, to, casting="safe"): + from_ = _extract_dtype(from_) + to_ = _extract_dtype(to) + + return _dtypes_impl.can_cast_impl(from_.torch_dtype, to_.torch_dtype, casting) + + +def result_type(*arrays_and_dtypes): + tensors = [] + for entry in arrays_and_dtypes: + try: + t = asarray(entry).tensor + except (RuntimeError, ValueError, TypeError): + dty = _dtypes.dtype(entry) + t = torch.empty(1, dtype=dty.torch_dtype) + tensors.append(t) + + torch_dtype = _dtypes_impl.result_type_impl(*tensors) + return _dtypes.dtype(torch_dtype) diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/_normalizations.py b/venv/lib/python3.10/site-packages/torch/_numpy/_normalizations.py new file mode 100644 index 0000000000000000000000000000000000000000..0569d2d3e7dc28a7cca030688573ac0f6b981e89 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/_normalizations.py @@ -0,0 +1,258 @@ +# mypy: ignore-errors + +""" "Normalize" arguments: convert array_likes to tensors, dtypes to torch dtypes and so on. +""" +from __future__ import annotations + +import functools +import inspect +import operator +import typing + +import torch + +from . import _dtypes, _dtypes_impl, _util + +ArrayLike = typing.TypeVar("ArrayLike") +Scalar = typing.Union[int, float, complex, bool] +ArrayLikeOrScalar = typing.Union[ArrayLike, Scalar] + +DTypeLike = typing.TypeVar("DTypeLike") +AxisLike = typing.TypeVar("AxisLike") +NDArray = typing.TypeVar("NDArray") +CastingModes = typing.TypeVar("CastingModes") +KeepDims = typing.TypeVar("KeepDims") + +# OutArray is to annotate the out= array argument. +# +# This one is special is several respects: +# First, It needs to be an NDArray, and we need to preserve the `result is out` +# semantics. Therefore, we cannot just extract the Tensor from the out array. +# So we never pass the out array to implementer functions and handle it in the +# `normalizer` below. +# Second, the out= argument can be either keyword or positional argument, and +# as a positional arg, it can be anywhere in the signature. +# To handle all this, we define a special `OutArray` annotation and dispatch on it. +# +OutArray = typing.TypeVar("OutArray") + +try: + from typing import NotImplementedType +except ImportError: + NotImplementedType = typing.TypeVar("NotImplementedType") + + +def normalize_array_like(x, parm=None): + from ._ndarray import asarray + + return asarray(x).tensor + + +def normalize_array_like_or_scalar(x, parm=None): + if _dtypes_impl.is_scalar_or_symbolic(x): + return x + return normalize_array_like(x, parm) + + +def normalize_optional_array_like_or_scalar(x, parm=None): + if x is None: + return None + return normalize_array_like_or_scalar(x, parm) + + +def normalize_optional_array_like(x, parm=None): + # This explicit normalizer is needed because otherwise normalize_array_like + # does not run for a parameter annotated as Optional[ArrayLike] + return None if x is None else normalize_array_like(x, parm) + + +def normalize_seq_array_like(x, parm=None): + return tuple(normalize_array_like(value) for value in x) + + +def normalize_dtype(dtype, parm=None): + # cf _decorators.dtype_to_torch + torch_dtype = None + if dtype is not None: + dtype = _dtypes.dtype(dtype) + torch_dtype = dtype.torch_dtype + return torch_dtype + + +def normalize_not_implemented(arg, parm): + if arg != parm.default: + raise NotImplementedError(f"'{parm.name}' parameter is not supported.") + + +def normalize_axis_like(arg, parm=None): + from ._ndarray import ndarray + + if isinstance(arg, ndarray): + arg = operator.index(arg) + return arg + + +def normalize_ndarray(arg, parm=None): + # check the arg is an ndarray, extract its tensor attribute + if arg is None: + return arg + + from ._ndarray import ndarray + + if not isinstance(arg, ndarray): + raise TypeError(f"'{parm.name}' must be an array") + return arg.tensor + + +def normalize_outarray(arg, parm=None): + # almost normalize_ndarray, only return the array, not its tensor + if arg is None: + return arg + from ._ndarray import ndarray + + # Dynamo can pass torch tensors as out arguments, + # wrap it in an ndarray before processing + if isinstance(arg, torch.Tensor): + arg = ndarray(arg) + + if not isinstance(arg, ndarray): + raise TypeError(f"'{parm.name}' must be an array") + return arg + + +def normalize_casting(arg, parm=None): + if arg not in ["no", "equiv", "safe", "same_kind", "unsafe"]: + raise ValueError( + f"casting must be one of 'no', 'equiv', 'safe', 'same_kind', or 'unsafe' (got '{arg}')" + ) + return arg + + +normalizers = { + "ArrayLike": normalize_array_like, + "ArrayLikeOrScalar": normalize_array_like_or_scalar, + "Optional[ArrayLike]": normalize_optional_array_like, + "Sequence[ArrayLike]": normalize_seq_array_like, + "Optional[ArrayLikeOrScalar]": normalize_optional_array_like_or_scalar, + "Optional[NDArray]": normalize_ndarray, + "Optional[OutArray]": normalize_outarray, + "NDArray": normalize_ndarray, + "Optional[DTypeLike]": normalize_dtype, + "AxisLike": normalize_axis_like, + "NotImplementedType": normalize_not_implemented, + "Optional[CastingModes]": normalize_casting, +} + + +def maybe_normalize(arg, parm): + """Normalize arg if a normalizer is registered.""" + normalizer = normalizers.get(parm.annotation, None) + return normalizer(arg, parm) if normalizer else arg + + +# ### Return value helpers ### + + +def maybe_copy_to(out, result, promote_scalar_result=False): + # NB: here out is either an ndarray or None + if out is None: + return result + elif isinstance(result, torch.Tensor): + if result.shape != out.shape: + can_fit = result.numel() == 1 and out.ndim == 0 + if promote_scalar_result and can_fit: + result = result.squeeze() + else: + raise ValueError( + f"Bad size of the out array: out.shape = {out.shape}" + f" while result.shape = {result.shape}." + ) + out.tensor.copy_(result) + return out + elif isinstance(result, (tuple, list)): + return type(result)( + maybe_copy_to(o, r, promote_scalar_result) for o, r in zip(out, result) + ) + else: + raise AssertionError() # We should never hit this path + + +def wrap_tensors(result): + from ._ndarray import ndarray + + if isinstance(result, torch.Tensor): + return ndarray(result) + elif isinstance(result, (tuple, list)): + result = type(result)(wrap_tensors(x) for x in result) + return result + + +def array_or_scalar(values, py_type=float, return_scalar=False): + if return_scalar: + return py_type(values.item()) + else: + from ._ndarray import ndarray + + return ndarray(values) + + +# ### The main decorator to normalize arguments / postprocess the output ### + + +def normalizer(_func=None, *, promote_scalar_result=False): + def normalizer_inner(func): + @functools.wraps(func) + def wrapped(*args, **kwds): + sig = inspect.signature(func) + params = sig.parameters + first_param = next(iter(params.values())) + + # NumPy's API does not have positional args before variadic positional args + if first_param.kind == inspect.Parameter.VAR_POSITIONAL: + args = [maybe_normalize(arg, first_param) for arg in args] + else: + # NB: extra unknown arguments: pass through, will raise in func(*args) below + args = ( + tuple( + maybe_normalize(arg, parm) + for arg, parm in zip(args, params.values()) + ) + + args[len(params.values()) :] + ) + + kwds = { + name: maybe_normalize(arg, params[name]) if name in params else arg + for name, arg in kwds.items() + } + + result = func(*args, **kwds) + + # keepdims + bound_args = None + if "keepdims" in params and params["keepdims"].annotation == "KeepDims": + # keepdims can be in any position so we need sig.bind + bound_args = sig.bind(*args, **kwds).arguments + if bound_args.get("keepdims", False): + # In this case the first arg is the initial tensor and + # the second arg is (optionally) the axis + tensor = args[0] + axis = bound_args.get("axis") + result = _util.apply_keepdims(result, axis, tensor.ndim) + + # out + if "out" in params: + # out can be in any position so we need sig.bind + if bound_args is None: + bound_args = sig.bind(*args, **kwds).arguments + out = bound_args.get("out") + result = maybe_copy_to(out, result, promote_scalar_result) + result = wrap_tensors(result) + + return result + + return wrapped + + if _func is None: + return normalizer_inner + else: + return normalizer_inner(_func) diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py b/venv/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..cfd95800c36f154569e26153cb90f9863b41aa9d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/_reductions_impl.py @@ -0,0 +1,456 @@ +# mypy: ignore-errors + +""" Implementation of reduction operations, to be wrapped into arrays, dtypes etc +in the 'public' layer. + +Anything here only deals with torch objects, e.g. "dtype" is a torch.dtype instance etc +""" +from __future__ import annotations + +import functools +from typing import Optional + +import torch + +from . import _dtypes_impl, _util +from ._normalizations import ( + ArrayLike, + AxisLike, + DTypeLike, + KeepDims, + NotImplementedType, + OutArray, +) + + +def _deco_axis_expand(func): + """ + Generically handle axis arguments in reductions. + axis is *always* the 2nd arg in the function so no need to have a look at its signature + """ + + @functools.wraps(func) + def wrapped(a, axis=None, *args, **kwds): + if axis is not None: + axis = _util.normalize_axis_tuple(axis, a.ndim) + + if axis == (): + # So we insert a length-one axis and run the reduction along it. + # We cannot return a.clone() as this would sidestep the checks inside the function + newshape = _util.expand_shape(a.shape, axis=0) + a = a.reshape(newshape) + axis = (0,) + + return func(a, axis, *args, **kwds) + + return wrapped + + +def _atleast_float(dtype, other_dtype): + """Return a dtype that is real or complex floating-point. + + For inputs that are boolean or integer dtypes, this returns the default + float dtype; inputs that are complex get converted to the default complex + dtype; real floating-point dtypes (`float*`) get passed through unchanged + """ + if dtype is None: + dtype = other_dtype + if not (dtype.is_floating_point or dtype.is_complex): + return _dtypes_impl.default_dtypes().float_dtype + return dtype + + +@_deco_axis_expand +def count_nonzero(a: ArrayLike, axis: AxisLike = None, *, keepdims: KeepDims = False): + return a.count_nonzero(axis) + + +@_deco_axis_expand +def argmax( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + *, + keepdims: KeepDims = False, +): + if a.is_complex(): + raise NotImplementedError(f"argmax with dtype={a.dtype}.") + + axis = _util.allow_only_single_axis(axis) + + if a.dtype == torch.bool: + # RuntimeError: "argmax_cpu" not implemented for 'Bool' + a = a.to(torch.uint8) + + return torch.argmax(a, axis) + + +@_deco_axis_expand +def argmin( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + *, + keepdims: KeepDims = False, +): + if a.is_complex(): + raise NotImplementedError(f"argmin with dtype={a.dtype}.") + + axis = _util.allow_only_single_axis(axis) + + if a.dtype == torch.bool: + # RuntimeError: "argmin_cpu" not implemented for 'Bool' + a = a.to(torch.uint8) + + return torch.argmin(a, axis) + + +@_deco_axis_expand +def any( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + axis = _util.allow_only_single_axis(axis) + axis_kw = {} if axis is None else {"dim": axis} + return torch.any(a, **axis_kw) + + +@_deco_axis_expand +def all( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + axis = _util.allow_only_single_axis(axis) + axis_kw = {} if axis is None else {"dim": axis} + return torch.all(a, **axis_kw) + + +@_deco_axis_expand +def amax( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + initial: NotImplementedType = None, + where: NotImplementedType = None, +): + if a.is_complex(): + raise NotImplementedError(f"amax with dtype={a.dtype}") + + return a.amax(axis) + + +max = amax + + +@_deco_axis_expand +def amin( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + initial: NotImplementedType = None, + where: NotImplementedType = None, +): + if a.is_complex(): + raise NotImplementedError(f"amin with dtype={a.dtype}") + + return a.amin(axis) + + +min = amin + + +@_deco_axis_expand +def ptp( + a: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, +): + return a.amax(axis) - a.amin(axis) + + +@_deco_axis_expand +def sum( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + initial: NotImplementedType = None, + where: NotImplementedType = None, +): + assert dtype is None or isinstance(dtype, torch.dtype) + + if dtype == torch.bool: + dtype = _dtypes_impl.default_dtypes().int_dtype + + axis_kw = {} if axis is None else {"dim": axis} + return a.sum(dtype=dtype, **axis_kw) + + +@_deco_axis_expand +def prod( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + initial: NotImplementedType = None, + where: NotImplementedType = None, +): + axis = _util.allow_only_single_axis(axis) + + if dtype == torch.bool: + dtype = _dtypes_impl.default_dtypes().int_dtype + + axis_kw = {} if axis is None else {"dim": axis} + return a.prod(dtype=dtype, **axis_kw) + + +product = prod + + +@_deco_axis_expand +def mean( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + dtype = _atleast_float(dtype, a.dtype) + + axis_kw = {} if axis is None else {"dim": axis} + result = a.mean(dtype=dtype, **axis_kw) + + return result + + +@_deco_axis_expand +def std( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + ddof=0, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + in_dtype = dtype + dtype = _atleast_float(dtype, a.dtype) + tensor = _util.cast_if_needed(a, dtype) + result = tensor.std(dim=axis, correction=ddof) + return _util.cast_if_needed(result, in_dtype) + + +@_deco_axis_expand +def var( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, + ddof=0, + keepdims: KeepDims = False, + *, + where: NotImplementedType = None, +): + in_dtype = dtype + dtype = _atleast_float(dtype, a.dtype) + tensor = _util.cast_if_needed(a, dtype) + result = tensor.var(dim=axis, correction=ddof) + return _util.cast_if_needed(result, in_dtype) + + +# cumsum / cumprod are almost reductions: +# 1. no keepdims +# 2. axis=None flattens + + +def cumsum( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, +): + if dtype == torch.bool: + dtype = _dtypes_impl.default_dtypes().int_dtype + if dtype is None: + dtype = a.dtype + + (a,), axis = _util.axis_none_flatten(a, axis=axis) + axis = _util.normalize_axis_index(axis, a.ndim) + + return a.cumsum(axis=axis, dtype=dtype) + + +def cumprod( + a: ArrayLike, + axis: AxisLike = None, + dtype: Optional[DTypeLike] = None, + out: Optional[OutArray] = None, +): + if dtype == torch.bool: + dtype = _dtypes_impl.default_dtypes().int_dtype + if dtype is None: + dtype = a.dtype + + (a,), axis = _util.axis_none_flatten(a, axis=axis) + axis = _util.normalize_axis_index(axis, a.ndim) + + return a.cumprod(axis=axis, dtype=dtype) + + +cumproduct = cumprod + + +def average( + a: ArrayLike, + axis=None, + weights: ArrayLike = None, + returned=False, + *, + keepdims=False, +): + if weights is None: + result = mean(a, axis=axis) + wsum = torch.as_tensor(a.numel() / result.numel(), dtype=result.dtype) + else: + if not a.dtype.is_floating_point: + a = a.double() + + # axis & weights + if a.shape != weights.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights differ." + ) + if weights.ndim != 1: + raise TypeError( + "1D weights expected when shapes of a and weights differ." + ) + if weights.shape[0] != a.shape[axis]: + raise ValueError( + "Length of weights not compatible with specified axis." + ) + + # setup weight to broadcast along axis + weights = torch.broadcast_to(weights, (a.ndim - 1) * (1,) + weights.shape) + weights = weights.swapaxes(-1, axis) + + # do the work + result_dtype = _dtypes_impl.result_type_impl(a, weights) + numerator = sum(a * weights, axis, dtype=result_dtype) + wsum = sum(weights, axis, dtype=result_dtype) + result = numerator / wsum + + # We process keepdims manually because the decorator does not deal with variadic returns + if keepdims: + result = _util.apply_keepdims(result, axis, a.ndim) + + if returned: + if wsum.shape != result.shape: + wsum = torch.broadcast_to(wsum, result.shape).clone() + return result, wsum + else: + return result + + +# Not using deco_axis_expand as it assumes that axis is the second arg +def quantile( + a: ArrayLike, + q: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + overwrite_input=False, + method="linear", + keepdims: KeepDims = False, + *, + interpolation: NotImplementedType = None, +): + if overwrite_input: + # raise NotImplementedError("overwrite_input in quantile not implemented.") + # NumPy documents that `overwrite_input` MAY modify inputs: + # https://numpy.org/doc/stable/reference/generated/numpy.percentile.html#numpy-percentile + # Here we choose to work out-of-place because why not. + pass + + if not a.dtype.is_floating_point: + dtype = _dtypes_impl.default_dtypes().float_dtype + a = a.to(dtype) + + # edge case: torch.quantile only supports float32 and float64 + if a.dtype == torch.float16: + a = a.to(torch.float32) + + if axis is None: + a = a.flatten() + q = q.flatten() + axis = (0,) + else: + axis = _util.normalize_axis_tuple(axis, a.ndim) + + # FIXME(Mario) Doesn't np.quantile accept a tuple? + # torch.quantile does accept a number. If we don't want to implement the tuple behaviour + # (it's deffo low prio) change `normalize_axis_tuple` into a normalize_axis index above. + axis = _util.allow_only_single_axis(axis) + + q = _util.cast_if_needed(q, a.dtype) + + return torch.quantile(a, q, axis=axis, interpolation=method) + + +def percentile( + a: ArrayLike, + q: ArrayLike, + axis: AxisLike = None, + out: Optional[OutArray] = None, + overwrite_input=False, + method="linear", + keepdims: KeepDims = False, + *, + interpolation: NotImplementedType = None, +): + # np.percentile(float_tensor, 30) : q.dtype is int64 => q / 100.0 is float32 + if _dtypes_impl.python_type_for_torch(q.dtype) == int: + q = q.to(_dtypes_impl.default_dtypes().float_dtype) + qq = q / 100.0 + + return quantile( + a, + qq, + axis=axis, + overwrite_input=overwrite_input, + method=method, + keepdims=keepdims, + interpolation=interpolation, + ) + + +def median( + a: ArrayLike, + axis=None, + out: Optional[OutArray] = None, + overwrite_input=False, + keepdims: KeepDims = False, +): + return quantile( + a, + torch.as_tensor(0.5), + axis=axis, + overwrite_input=overwrite_input, + out=out, + keepdims=keepdims, + ) diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/_ufuncs.py b/venv/lib/python3.10/site-packages/torch/_numpy/_ufuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..0543aad2f7a6ba551d8ce7483e1dc8e0474ac57e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/_ufuncs.py @@ -0,0 +1,334 @@ +# mypy: ignore-errors + +from __future__ import annotations + +from typing import Optional + +import torch + +from . import _binary_ufuncs_impl, _dtypes_impl, _unary_ufuncs_impl, _util +from ._normalizations import ( + ArrayLike, + ArrayLikeOrScalar, + CastingModes, + DTypeLike, + normalizer, + NotImplementedType, + OutArray, +) + + +def _ufunc_postprocess(result, out, casting): + if out is not None: + result = _util.typecast_tensor(result, out.dtype.torch_dtype, casting) + result = torch.broadcast_to(result, out.shape) + return result + + +# ############# Binary ufuncs ###################### + +_binary = [ + name + for name in dir(_binary_ufuncs_impl) + if not name.startswith("_") and name not in ["torch", "matmul", "divmod", "ldexp"] +] + + +NEP50_FUNCS = ( + "add", + "subtract", + "multiply", + "floor_divide", + "true_divide", + "divide", + "remainder", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "bitwise_left_shift", + "bitwise_right_shift", + "hypot", + "arctan2", + "logaddexp", + "logaddexp2", + "heaviside", + "copysign", + "fmax", + "minimum", + "fmin", + "maximum", + "fmod", + "gcd", + "lcm", + "pow", +) + + +def deco_binary_ufunc(torch_func): + """Common infra for binary ufuncs. + + Normalize arguments, sort out type casting, broadcasting and delegate to + the pytorch functions for the actual work. + """ + + @normalizer + def wrapped( + x1: ArrayLikeOrScalar, + x2: ArrayLikeOrScalar, + /, + out: Optional[OutArray] = None, + *, + where: NotImplementedType = True, + casting: Optional[CastingModes] = "same_kind", + order: NotImplementedType = "K", + dtype: Optional[DTypeLike] = None, + subok: NotImplementedType = False, + signature: NotImplementedType = None, + extobj: NotImplementedType = None, + ): + if dtype is not None: + + def cast(x, dtype): + if isinstance(x, torch.Tensor): + return _util.typecast_tensor(x, dtype, casting) + else: + return torch.as_tensor(x, dtype=dtype) + + x1 = cast(x1, dtype) + x2 = cast(x2, dtype) + elif isinstance(x1, torch.Tensor) and isinstance(x2, torch.Tensor): + dtype = _dtypes_impl.result_type_impl(x1, x2) + x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting) + else: + x1, x2 = _dtypes_impl.nep50_to_tensors( + x1, x2, torch_func.__name__ in NEP50_FUNCS, torch_func.__name__ + ) + + result = torch_func(x1, x2) + + return _ufunc_postprocess(result, out, casting) + + wrapped.__qualname__ = torch_func.__name__ + wrapped.__name__ = torch_func.__name__ + + return wrapped + + +# matmul's signature is _slightly_ different from other ufuncs: +# - no where=... +# - additional axis=..., axes=... +# - no NEP50 scalars in or out +@normalizer +def matmul( + x1: ArrayLike, + x2: ArrayLike, + /, + out: Optional[OutArray] = None, + *, + casting: Optional[CastingModes] = "same_kind", + order: NotImplementedType = "K", + dtype: Optional[DTypeLike] = None, + subok: NotImplementedType = False, + signature: NotImplementedType = None, + extobj: NotImplementedType = None, + axes: NotImplementedType = None, + axis: NotImplementedType = None, +): + if dtype is None: + dtype = _dtypes_impl.result_type_impl(x1, x2) + x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting) + + result = _binary_ufuncs_impl.matmul(x1, x2) + + result = _ufunc_postprocess(result, out, casting) + return result + + +# ldexp casting is special : the dtype of the result == dtype of the 1st arg +@normalizer +def ldexp( + x1: ArrayLikeOrScalar, + x2: ArrayLikeOrScalar, + /, + out: Optional[OutArray] = None, + *, + where: NotImplementedType = True, + casting: Optional[CastingModes] = "same_kind", + order: NotImplementedType = "K", + dtype: Optional[DTypeLike] = None, + subok: NotImplementedType = False, + signature: NotImplementedType = None, + extobj: NotImplementedType = None, +): + if dtype is not None: + if isinstance(x1, torch.Tensor): + x1 = _util.typecast_tensor(x1, dtype, casting) + else: + x1 = torch.as_tensor(x1, dtype=dtype) + else: + if not isinstance(x1, torch.Tensor): + x1 = torch.as_tensor(x1) + x1 = _util.cast_int_to_float(x1) + + x2 = torch.as_tensor(x2) + # the second arg must be integer + if _dtypes_impl._category(x2.dtype) != 1: + raise ValueError("ldexp 2nd arg must be integer") + + result = _binary_ufuncs_impl.ldexp(x1, x2) + + if x1.dtype == torch.float16: + # torch.ldexp(f16, int) -> f32, undo it + result = result.to(torch.float16) + + return _ufunc_postprocess(result, out, casting) + + +# nin=2, nout=2 +@normalizer +def divmod( + x1: ArrayLike, + x2: ArrayLike, + out1: Optional[OutArray] = None, + out2: Optional[OutArray] = None, + /, + out: tuple[Optional[OutArray], Optional[OutArray]] = (None, None), + *, + where: NotImplementedType = True, + casting: Optional[CastingModes] = "same_kind", + order: NotImplementedType = "K", + dtype: Optional[DTypeLike] = None, + subok: NotImplementedType = False, + signature: NotImplementedType = None, + extobj: NotImplementedType = None, +): + # make sure we either have no out arrays at all, or there is either + # out1, out2, or out=tuple, but not both + num_outs = sum(x is not None for x in [out1, out2]) + if num_outs == 1: + raise ValueError("both out1 and out2 need to be provided") + elif num_outs == 2: + o1, o2 = out + if o1 is not None or o2 is not None: + raise TypeError( + "cannot specify 'out' as both a positional and keyword argument" + ) + else: + out1, out2 = out + + if dtype is None: + dtype = _dtypes_impl.result_type_impl(x1, x2) + x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting) + + quot, rem = _binary_ufuncs_impl.divmod(x1, x2) + + quot = _ufunc_postprocess(quot, out1, casting) + rem = _ufunc_postprocess(rem, out2, casting) + return quot, rem + + +# +# Attach ufuncs to this module, for a further export to the public namespace in __init__.py +# +for name in _binary: + ufunc = getattr(_binary_ufuncs_impl, name) + vars()[name] = deco_binary_ufunc(ufunc) + + +def modf(x, /, *args, **kwds): + quot, rem = divmod(x, 1, *args, **kwds) + return rem, quot + + +_binary = _binary + ["divmod", "modf", "matmul", "ldexp"] + + +# ############# Unary ufuncs ###################### + + +_unary = [ + name + for name in dir(_unary_ufuncs_impl) + if not name.startswith("_") and name != "torch" +] + + +# these are ufunc(int) -> float +_fp_unary = [ + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctanh", + "cbrt", + "cos", + "cosh", + "deg2rad", + "degrees", + "exp", + "exp2", + "expm1", + "log", + "log10", + "log1p", + "log2", + "rad2deg", + "radians", + "reciprocal", + "sin", + "sinh", + "sqrt", + "square", + "tan", + "tanh", + "trunc", +] + + +def deco_unary_ufunc(torch_func): + """Common infra for unary ufuncs. + + Normalize arguments, sort out type casting, broadcasting and delegate to + the pytorch functions for the actual work. + """ + + @normalizer + def wrapped( + x: ArrayLike, + /, + out: Optional[OutArray] = None, + *, + where=True, + casting: Optional[CastingModes] = "same_kind", + order="K", + dtype: Optional[DTypeLike] = None, + subok: NotImplementedType = False, + signature=None, + extobj=None, + ): + if dtype is not None: + x = _util.typecast_tensor(x, dtype, casting) + + if torch_func.__name__ in _fp_unary: + x = _util.cast_int_to_float(x) + + result = torch_func(x) + result = _ufunc_postprocess(result, out, casting) + return result + + wrapped.__qualname__ = torch_func.__name__ + wrapped.__name__ = torch_func.__name__ + + return wrapped + + +# +# Attach ufuncs to this module, for a further export to the public namespace in __init__.py +# +for name in _unary: + ufunc = getattr(_unary_ufuncs_impl, name) + vars()[name] = deco_unary_ufunc(ufunc) + + +__all__ = _binary + _unary # noqa: PLE0605 diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/_unary_ufuncs_impl.py b/venv/lib/python3.10/site-packages/torch/_numpy/_unary_ufuncs_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..ce4a30cc40e7132e24812041d438c60b8f07ca8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/_unary_ufuncs_impl.py @@ -0,0 +1,73 @@ +# mypy: ignore-errors + +"""Export torch work functions for unary ufuncs, rename/tweak to match numpy. +This listing is further exported to public symbols in the `_numpy/_ufuncs.py` module. +""" + +import torch + +from torch import ( # noqa: F401 + absolute as fabs, # noqa: F401 + arccos, # noqa: F401 + arccosh, # noqa: F401 + arcsin, # noqa: F401 + arcsinh, # noqa: F401 + arctan, # noqa: F401 + arctanh, # noqa: F401 + bitwise_not, # noqa: F401 + bitwise_not as invert, # noqa: F401 + ceil, # noqa: F401 + conj_physical as conjugate, # noqa: F401 + cos, # noqa: F401 + cosh, # noqa: F401 + deg2rad, # noqa: F401 + deg2rad as radians, # noqa: F401 + exp, # noqa: F401 + exp2, # noqa: F401 + expm1, # noqa: F401 + floor, # noqa: F401 + isfinite, # noqa: F401 + isinf, # noqa: F401 + isnan, # noqa: F401 + log, # noqa: F401 + log10, # noqa: F401 + log1p, # noqa: F401 + log2, # noqa: F401 + logical_not, # noqa: F401 + negative, # noqa: F401 + rad2deg, # noqa: F401 + rad2deg as degrees, # noqa: F401 + reciprocal, # noqa: F401 + round as fix, # noqa: F401 + round as rint, # noqa: F401 + sign, # noqa: F401 + signbit, # noqa: F401 + sin, # noqa: F401 + sinh, # noqa: F401 + sqrt, # noqa: F401 + square, # noqa: F401 + tan, # noqa: F401 + tanh, # noqa: F401 + trunc, # noqa: F401 +) + + +# special cases: torch does not export these names +def cbrt(x): + return torch.pow(x, 1 / 3) + + +def positive(x): + return +x + + +def absolute(x): + # work around torch.absolute not impl for bools + if x.dtype == torch.bool: + return x + return torch.absolute(x) + + +# TODO set __name__ and __qualname__ +abs = absolute +conj = conjugate diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/_util.py b/venv/lib/python3.10/site-packages/torch/_numpy/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..ff219d930731c4daeec8829de8718bb1af16b2f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/_util.py @@ -0,0 +1,261 @@ +# mypy: ignore-errors + +"""Assorted utilities, which do not need anything other then torch and stdlib. +""" + +import operator + +import torch + +from . import _dtypes_impl + + +# https://github.com/numpy/numpy/blob/v1.23.0/numpy/distutils/misc_util.py#L497-L504 +def is_sequence(seq): + if isinstance(seq, str): + return False + try: + len(seq) + except Exception: + return False + return True + + +class AxisError(ValueError, IndexError): + pass + + +class UFuncTypeError(TypeError, RuntimeError): + pass + + +def cast_if_needed(tensor, dtype): + # NB: no casting if dtype=None + if dtype is not None and tensor.dtype != dtype: + tensor = tensor.to(dtype) + return tensor + + +def cast_int_to_float(x): + # cast integers and bools to the default float dtype + if _dtypes_impl._category(x.dtype) < 2: + x = x.to(_dtypes_impl.default_dtypes().float_dtype) + return x + + +# a replica of the version in ./numpy/numpy/core/src/multiarray/common.h +def normalize_axis_index(ax, ndim, argname=None): + if not (-ndim <= ax < ndim): + raise AxisError(f"axis {ax} is out of bounds for array of dimension {ndim}") + if ax < 0: + ax += ndim + return ax + + +# from https://github.com/numpy/numpy/blob/main/numpy/core/numeric.py#L1378 +def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): + """ + Normalizes an axis argument into a tuple of non-negative integer axes. + + This handles shorthands such as ``1`` and converts them to ``(1,)``, + as well as performing the handling of negative indices covered by + `normalize_axis_index`. + + By default, this forbids axes from being specified multiple times. + Used internally by multi-axis-checking logic. + + Parameters + ---------- + axis : int, iterable of int + The un-normalized index or indices of the axis. + ndim : int + The number of dimensions of the array that `axis` should be normalized + against. + argname : str, optional + A prefix to put before the error message, typically the name of the + argument. + allow_duplicate : bool, optional + If False, the default, disallow an axis from being specified twice. + + Returns + ------- + normalized_axes : tuple of int + The normalized axis index, such that `0 <= normalized_axis < ndim` + """ + # Optimization to speed-up the most common cases. + if type(axis) not in (tuple, list): + try: + axis = [operator.index(axis)] + except TypeError: + pass + # Going via an iterator directly is slower than via list comprehension. + axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis]) + if not allow_duplicate and len(set(axis)) != len(axis): + if argname: + raise ValueError(f"repeated axis in `{argname}` argument") + else: + raise ValueError("repeated axis") + return axis + + +def allow_only_single_axis(axis): + if axis is None: + return axis + if len(axis) != 1: + raise NotImplementedError("does not handle tuple axis") + return axis[0] + + +def expand_shape(arr_shape, axis): + # taken from numpy 1.23.x, expand_dims function + if type(axis) not in (list, tuple): + axis = (axis,) + out_ndim = len(axis) + len(arr_shape) + axis = normalize_axis_tuple(axis, out_ndim) + shape_it = iter(arr_shape) + shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)] + return shape + + +def apply_keepdims(tensor, axis, ndim): + if axis is None: + # tensor was a scalar + shape = (1,) * ndim + tensor = tensor.expand(shape).contiguous() + else: + shape = expand_shape(tensor.shape, axis) + tensor = tensor.reshape(shape) + return tensor + + +def axis_none_flatten(*tensors, axis=None): + """Flatten the arrays if axis is None.""" + if axis is None: + tensors = tuple(ar.flatten() for ar in tensors) + return tensors, 0 + else: + return tensors, axis + + +def typecast_tensor(t, target_dtype, casting): + """Dtype-cast tensor to target_dtype. + + Parameters + ---------- + t : torch.Tensor + The tensor to cast + target_dtype : torch dtype object + The array dtype to cast all tensors to + casting : str + The casting mode, see `np.can_cast` + + Returns + ------- + `torch.Tensor` of the `target_dtype` dtype + + Raises + ------ + ValueError + if the argument cannot be cast according to the `casting` rule + + """ + can_cast = _dtypes_impl.can_cast_impl + + if not can_cast(t.dtype, target_dtype, casting=casting): + raise TypeError( + f"Cannot cast array data from {t.dtype} to" + f" {target_dtype} according to the rule '{casting}'" + ) + return cast_if_needed(t, target_dtype) + + +def typecast_tensors(tensors, target_dtype, casting): + return tuple(typecast_tensor(t, target_dtype, casting) for t in tensors) + + +def _try_convert_to_tensor(obj): + try: + tensor = torch.as_tensor(obj) + except Exception as e: + mesg = f"failed to convert {obj} to ndarray. \nInternal error is: {str(e)}." + raise NotImplementedError(mesg) # noqa: TRY200 + return tensor + + +def _coerce_to_tensor(obj, dtype=None, copy=False, ndmin=0): + """The core logic of the array(...) function. + + Parameters + ---------- + obj : tensor_like + The thing to coerce + dtype : torch.dtype object or None + Coerce to this torch dtype + copy : bool + Copy or not + ndmin : int + The results as least this many dimensions + is_weak : bool + Whether obj is a weakly typed python scalar. + + Returns + ------- + tensor : torch.Tensor + a tensor object with requested dtype, ndim and copy semantics. + + Notes + ----- + This is almost a "tensor_like" coersion function. Does not handle wrapper + ndarrays (those should be handled in the ndarray-aware layer prior to + invoking this function). + """ + if isinstance(obj, torch.Tensor): + tensor = obj + else: + # tensor.dtype is the pytorch default, typically float32. If obj's elements + # are not exactly representable in float32, we've lost precision: + # >>> torch.as_tensor(1e12).item() - 1e12 + # -4096.0 + default_dtype = torch.get_default_dtype() + torch.set_default_dtype(_dtypes_impl.get_default_dtype_for(torch.float32)) + try: + tensor = _try_convert_to_tensor(obj) + finally: + torch.set_default_dtype(default_dtype) + + # type cast if requested + tensor = cast_if_needed(tensor, dtype) + + # adjust ndim if needed + ndim_extra = ndmin - tensor.ndim + if ndim_extra > 0: + tensor = tensor.view((1,) * ndim_extra + tensor.shape) + + # copy if requested + if copy: + tensor = tensor.clone() + + return tensor + + +def ndarrays_to_tensors(*inputs): + """Convert all ndarrays from `inputs` to tensors. (other things are intact)""" + from ._ndarray import ndarray + + if len(inputs) == 0: + return ValueError() + elif len(inputs) == 1: + input_ = inputs[0] + if isinstance(input_, ndarray): + return input_.tensor + elif isinstance(input_, tuple): + result = [] + for sub_input in input_: + sub_result = ndarrays_to_tensors(sub_input) + result.append(sub_result) + return tuple(result) + else: + return input_ + else: + assert isinstance(inputs, tuple) # sanity check + return ndarrays_to_tensors(inputs) diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/fft.py b/venv/lib/python3.10/site-packages/torch/_numpy/fft.py new file mode 100644 index 0000000000000000000000000000000000000000..b7d2f8365dbbd96c30c047d743dda806f12cb211 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/fft.py @@ -0,0 +1,130 @@ +# mypy: ignore-errors + +from __future__ import annotations + +import functools + +import torch + +from . import _dtypes_impl, _util +from ._normalizations import ArrayLike, normalizer + + +def upcast(func): + """NumPy fft casts inputs to 64 bit and *returns 64-bit results*.""" + + @functools.wraps(func) + def wrapped(tensor, *args, **kwds): + target_dtype = ( + _dtypes_impl.default_dtypes().complex_dtype + if tensor.is_complex() + else _dtypes_impl.default_dtypes().float_dtype + ) + tensor = _util.cast_if_needed(tensor, target_dtype) + return func(tensor, *args, **kwds) + + return wrapped + + +@normalizer +@upcast +def fft(a: ArrayLike, n=None, axis=-1, norm=None): + return torch.fft.fft(a, n, dim=axis, norm=norm) + + +@normalizer +@upcast +def ifft(a: ArrayLike, n=None, axis=-1, norm=None): + return torch.fft.ifft(a, n, dim=axis, norm=norm) + + +@normalizer +@upcast +def rfft(a: ArrayLike, n=None, axis=-1, norm=None): + return torch.fft.rfft(a, n, dim=axis, norm=norm) + + +@normalizer +@upcast +def irfft(a: ArrayLike, n=None, axis=-1, norm=None): + return torch.fft.irfft(a, n, dim=axis, norm=norm) + + +@normalizer +@upcast +def fftn(a: ArrayLike, s=None, axes=None, norm=None): + return torch.fft.fftn(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def ifftn(a: ArrayLike, s=None, axes=None, norm=None): + return torch.fft.ifftn(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def rfftn(a: ArrayLike, s=None, axes=None, norm=None): + return torch.fft.rfftn(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def irfftn(a: ArrayLike, s=None, axes=None, norm=None): + return torch.fft.irfftn(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def fft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None): + return torch.fft.fft2(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def ifft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None): + return torch.fft.ifft2(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def rfft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None): + return torch.fft.rfft2(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def irfft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None): + return torch.fft.irfft2(a, s, dim=axes, norm=norm) + + +@normalizer +@upcast +def hfft(a: ArrayLike, n=None, axis=-1, norm=None): + return torch.fft.hfft(a, n, dim=axis, norm=norm) + + +@normalizer +@upcast +def ihfft(a: ArrayLike, n=None, axis=-1, norm=None): + return torch.fft.ihfft(a, n, dim=axis, norm=norm) + + +@normalizer +def fftfreq(n, d=1.0): + return torch.fft.fftfreq(n, d) + + +@normalizer +def rfftfreq(n, d=1.0): + return torch.fft.rfftfreq(n, d) + + +@normalizer +def fftshift(x: ArrayLike, axes=None): + return torch.fft.fftshift(x, axes) + + +@normalizer +def ifftshift(x: ArrayLike, axes=None): + return torch.fft.ifftshift(x, axes) diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/linalg.py b/venv/lib/python3.10/site-packages/torch/_numpy/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..2232419db1b2e405ced654ff01bfe5269d132774 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/linalg.py @@ -0,0 +1,239 @@ +# mypy: ignore-errors + +from __future__ import annotations + +import functools +import math +from typing import Sequence + +import torch + +from . import _dtypes_impl, _util +from ._normalizations import ArrayLike, KeepDims, normalizer + + +class LinAlgError(Exception): + pass + + +def _atleast_float_1(a): + if not (a.dtype.is_floating_point or a.dtype.is_complex): + a = a.to(_dtypes_impl.default_dtypes().float_dtype) + return a + + +def _atleast_float_2(a, b): + dtyp = _dtypes_impl.result_type_impl(a, b) + if not (dtyp.is_floating_point or dtyp.is_complex): + dtyp = _dtypes_impl.default_dtypes().float_dtype + + a = _util.cast_if_needed(a, dtyp) + b = _util.cast_if_needed(b, dtyp) + return a, b + + +def linalg_errors(func): + @functools.wraps(func) + def wrapped(*args, **kwds): + try: + return func(*args, **kwds) + except torch._C._LinAlgError as e: + raise LinAlgError(*e.args) # noqa: TRY200 + + return wrapped + + +# ### Matrix and vector products ### + + +@normalizer +@linalg_errors +def matrix_power(a: ArrayLike, n): + a = _atleast_float_1(a) + return torch.linalg.matrix_power(a, n) + + +@normalizer +@linalg_errors +def multi_dot(inputs: Sequence[ArrayLike], *, out=None): + return torch.linalg.multi_dot(inputs) + + +# ### Solving equations and inverting matrices ### + + +@normalizer +@linalg_errors +def solve(a: ArrayLike, b: ArrayLike): + a, b = _atleast_float_2(a, b) + return torch.linalg.solve(a, b) + + +@normalizer +@linalg_errors +def lstsq(a: ArrayLike, b: ArrayLike, rcond=None): + a, b = _atleast_float_2(a, b) + # NumPy is using gelsd: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/umath_linalg.cpp#L3991 + # on CUDA, only `gels` is available though, so use it instead + driver = "gels" if a.is_cuda or b.is_cuda else "gelsd" + return torch.linalg.lstsq(a, b, rcond=rcond, driver=driver) + + +@normalizer +@linalg_errors +def inv(a: ArrayLike): + a = _atleast_float_1(a) + result = torch.linalg.inv(a) + return result + + +@normalizer +@linalg_errors +def pinv(a: ArrayLike, rcond=1e-15, hermitian=False): + a = _atleast_float_1(a) + return torch.linalg.pinv(a, rtol=rcond, hermitian=hermitian) + + +@normalizer +@linalg_errors +def tensorsolve(a: ArrayLike, b: ArrayLike, axes=None): + a, b = _atleast_float_2(a, b) + return torch.linalg.tensorsolve(a, b, dims=axes) + + +@normalizer +@linalg_errors +def tensorinv(a: ArrayLike, ind=2): + a = _atleast_float_1(a) + return torch.linalg.tensorinv(a, ind=ind) + + +# ### Norms and other numbers ### + + +@normalizer +@linalg_errors +def det(a: ArrayLike): + a = _atleast_float_1(a) + return torch.linalg.det(a) + + +@normalizer +@linalg_errors +def slogdet(a: ArrayLike): + a = _atleast_float_1(a) + return torch.linalg.slogdet(a) + + +@normalizer +@linalg_errors +def cond(x: ArrayLike, p=None): + x = _atleast_float_1(x) + + # check if empty + # cf: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744 + if x.numel() == 0 and math.prod(x.shape[-2:]) == 0: + raise LinAlgError("cond is not defined on empty arrays") + + result = torch.linalg.cond(x, p=p) + + # Convert nans to infs (numpy does it in a data-dependent way, depending on + # whether the input array has nans or not) + # XXX: NumPy does this: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744 + return torch.where(torch.isnan(result), float("inf"), result) + + +@normalizer +@linalg_errors +def matrix_rank(a: ArrayLike, tol=None, hermitian=False): + a = _atleast_float_1(a) + + if a.ndim < 2: + return int((a != 0).any()) + + if tol is None: + # follow https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1885 + atol = 0 + rtol = max(a.shape[-2:]) * torch.finfo(a.dtype).eps + else: + atol, rtol = tol, 0 + return torch.linalg.matrix_rank(a, atol=atol, rtol=rtol, hermitian=hermitian) + + +@normalizer +@linalg_errors +def norm(x: ArrayLike, ord=None, axis=None, keepdims: KeepDims = False): + x = _atleast_float_1(x) + return torch.linalg.norm(x, ord=ord, dim=axis) + + +# ### Decompositions ### + + +@normalizer +@linalg_errors +def cholesky(a: ArrayLike): + a = _atleast_float_1(a) + return torch.linalg.cholesky(a) + + +@normalizer +@linalg_errors +def qr(a: ArrayLike, mode="reduced"): + a = _atleast_float_1(a) + result = torch.linalg.qr(a, mode=mode) + if mode == "r": + # match NumPy + result = result.R + return result + + +@normalizer +@linalg_errors +def svd(a: ArrayLike, full_matrices=True, compute_uv=True, hermitian=False): + a = _atleast_float_1(a) + if not compute_uv: + return torch.linalg.svdvals(a) + + # NB: ignore the hermitian= argument (no pytorch equivalent) + result = torch.linalg.svd(a, full_matrices=full_matrices) + return result + + +# ### Eigenvalues and eigenvectors ### + + +@normalizer +@linalg_errors +def eig(a: ArrayLike): + a = _atleast_float_1(a) + w, vt = torch.linalg.eig(a) + + if not a.is_complex() and w.is_complex() and (w.imag == 0).all(): + w = w.real + vt = vt.real + return w, vt + + +@normalizer +@linalg_errors +def eigh(a: ArrayLike, UPLO="L"): + a = _atleast_float_1(a) + return torch.linalg.eigh(a, UPLO=UPLO) + + +@normalizer +@linalg_errors +def eigvals(a: ArrayLike): + a = _atleast_float_1(a) + result = torch.linalg.eigvals(a) + if not a.is_complex() and result.is_complex() and (result.imag == 0).all(): + result = result.real + return result + + +@normalizer +@linalg_errors +def eigvalsh(a: ArrayLike, UPLO="L"): + a = _atleast_float_1(a) + return torch.linalg.eigvalsh(a, UPLO=UPLO) diff --git a/venv/lib/python3.10/site-packages/torch/_numpy/random.py b/venv/lib/python3.10/site-packages/torch/_numpy/random.py new file mode 100644 index 0000000000000000000000000000000000000000..b10a4c667c8c6376dfef7663c2b18d91e6582d99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/_numpy/random.py @@ -0,0 +1,191 @@ +# mypy: ignore-errors + +"""Wrapper to mimic (parts of) np.random API surface. + +NumPy has strict guarantees on reproducibility etc; here we don't give any. + +Q: default dtype is float64 in numpy + +""" +from __future__ import annotations + +import functools +from math import sqrt +from typing import Optional + +import torch + +from . import _dtypes_impl, _util +from ._normalizations import array_or_scalar, ArrayLike, normalizer + + +__all__ = [ + "seed", + "random_sample", + "sample", + "random", + "rand", + "randn", + "normal", + "choice", + "randint", + "shuffle", + "uniform", +] + + +def use_numpy_random(): + # local import to avoid ref cycles + import torch._dynamo.config as config + + return config.use_numpy_random_stream + + +def deco_stream(func): + @functools.wraps(func) + def inner(*args, **kwds): + if not use_numpy_random(): + return func(*args, **kwds) + else: + import numpy + + from ._ndarray import ndarray + + f = getattr(numpy.random, func.__name__) + + # numpy funcs accept numpy ndarrays, unwrap + args = tuple( + arg.tensor.numpy() if isinstance(arg, ndarray) else arg for arg in args + ) + kwds = { + key: val.tensor.numpy() if isinstance(val, ndarray) else val + for key, val in kwds.items() + } + + value = f(*args, **kwds) + + # `value` can be either numpy.ndarray or python scalar (or None) + if isinstance(value, numpy.ndarray): + value = ndarray(torch.as_tensor(value)) + + return value + + return inner + + +@deco_stream +def seed(seed=None): + if seed is not None: + torch.random.manual_seed(seed) + + +@deco_stream +def random_sample(size=None): + if size is None: + size = () + dtype = _dtypes_impl.default_dtypes().float_dtype + values = torch.empty(size, dtype=dtype).uniform_() + return array_or_scalar(values, return_scalar=size == ()) + + +def rand(*size): + if size == (): + size = None + return random_sample(size) + + +sample = random_sample +random = random_sample + + +@deco_stream +def uniform(low=0.0, high=1.0, size=None): + if size is None: + size = () + dtype = _dtypes_impl.default_dtypes().float_dtype + values = torch.empty(size, dtype=dtype).uniform_(low, high) + return array_or_scalar(values, return_scalar=size == ()) + + +@deco_stream +def randn(*size): + dtype = _dtypes_impl.default_dtypes().float_dtype + values = torch.randn(size, dtype=dtype) + return array_or_scalar(values, return_scalar=size == ()) + + +@deco_stream +def normal(loc=0.0, scale=1.0, size=None): + if size is None: + size = () + dtype = _dtypes_impl.default_dtypes().float_dtype + values = torch.empty(size, dtype=dtype).normal_(loc, scale) + return array_or_scalar(values, return_scalar=size == ()) + + +@deco_stream +def shuffle(x): + # no @normalizer because we do not cast e.g. lists to tensors + from ._ndarray import ndarray + + if isinstance(x, torch.Tensor): + tensor = x + elif isinstance(x, ndarray): + tensor = x.tensor + else: + raise NotImplementedError("We do not random.shuffle lists in-place") + + perm = torch.randperm(tensor.shape[0]) + xp = tensor[perm] + tensor.copy_(xp) + + +@deco_stream +def randint(low, high=None, size=None): + if size is None: + size = () + if not isinstance(size, (tuple, list)): + size = (size,) + if high is None: + low, high = 0, low + values = torch.randint(low, high, size=size) + return array_or_scalar(values, int, return_scalar=size == ()) + + +@deco_stream +@normalizer +def choice(a: ArrayLike, size=None, replace=True, p: Optional[ArrayLike] = None): + # https://stackoverflow.com/questions/59461811/random-choice-with-pytorch + if a.numel() == 1: + a = torch.arange(a) + + # TODO: check a.dtype is integer -- cf np.random.choice(3.4) which raises + + # number of draws + if size is None: + num_el = 1 + elif _util.is_sequence(size): + num_el = 1 + for el in size: + num_el *= el + else: + num_el = size + + # prepare the probabilities + if p is None: + p = torch.ones_like(a) / a.shape[0] + + # cf https://github.com/numpy/numpy/blob/main/numpy/random/mtrand.pyx#L973 + atol = sqrt(torch.finfo(p.dtype).eps) + if abs(p.sum() - 1.0) > atol: + raise ValueError("probabilities do not sum to 1.") + + # actually sample + indices = torch.multinomial(p, num_el, replacement=replace) + + if _util.is_sequence(size): + indices = indices.reshape(size) + + samples = a[indices] + + return samples diff --git a/venv/lib/python3.10/site-packages/torch/nn/backends/__init__.py b/venv/lib/python3.10/site-packages/torch/nn/backends/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d11e14a3e5886b0e17986c072e8c6d001d1d33b2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/backends/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/backends/__pycache__/thnn.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/backends/__pycache__/thnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56c55464d36ae07ddc8d7cf1ea303d210ef26d85 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/backends/__pycache__/thnn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/backends/thnn.py b/venv/lib/python3.10/site-packages/torch/nn/backends/thnn.py new file mode 100644 index 0000000000000000000000000000000000000000..5250b4bff1674880c97be7b36ca81d6cd6b665a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/backends/thnn.py @@ -0,0 +1,4 @@ +# this is for historical pickle deserialization, it is not used otherwise + +def _get_thnn_function_backend(): + pass diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/__init__.py b/venv/lib/python3.10/site-packages/torch/nn/parallel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d0708296d47a49f47f6eae0acc89bb556d9fe4ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/parallel/__init__.py @@ -0,0 +1,14 @@ +from .parallel_apply import parallel_apply +from .replicate import replicate +from .data_parallel import DataParallel, data_parallel +from .scatter_gather import gather, scatter +from .distributed import DistributedDataParallel + +__all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel', + 'DataParallel', 'DistributedDataParallel'] + +def DistributedDataParallelCPU(*args, **kwargs): + import warnings + warnings.warn("torch.nn.parallel.DistributedDataParallelCPU is deprecated, " + "please use torch.nn.parallel.DistributedDataParallel instead.") + return DistributedDataParallel(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..132ccd0ed160a937d63173cce4bb2c24b7adb777 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e010474648149e8716d4094ce5a2edc34e981de Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae84d9d4fbde48e2eee897cfbe7321565435e72c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3687f1d6c920e0da85fb3c9fb83ee46ac7ff7e28 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..834c3180284aa8ba922802ed60ef6d31b5443857 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bddd078ea94d96c50059006e2904a44cf99a75a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44bdc5a9849d3b189c360060dba729838251f39e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca89039c607e5b6f2bbc3222f2979e36d53e2f4c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/_functions.py b/venv/lib/python3.10/site-packages/torch/nn/parallel/_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..d987ed2bc427462bf517665bda8e1889d5a58c90 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/parallel/_functions.py @@ -0,0 +1,126 @@ +import warnings + +import torch +from . import comm +from torch.autograd import Function +from torch._utils import _get_device_index +from typing import List, Optional + + +class Broadcast(Function): + + @staticmethod + def forward(ctx, target_gpus, *inputs): + assert all(i.device.type != 'cpu' for i in inputs), ( + 'Broadcast function not implemented for CPU tensors' + ) + target_gpus = [_get_device_index(x, True) for x in target_gpus] + ctx.target_gpus = target_gpus + if len(inputs) == 0: + return tuple() + ctx.num_inputs = len(inputs) + ctx.input_device = inputs[0].get_device() + outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus) + non_differentiables = [] + for idx, input_requires_grad in enumerate(ctx.needs_input_grad[1:]): + if not input_requires_grad: + for output in outputs: + non_differentiables.append(output[idx]) + ctx.mark_non_differentiable(*non_differentiables) + return tuple([t for tensors in outputs for t in tensors]) + + @staticmethod + def backward(ctx, *grad_outputs): + return (None,) + ReduceAddCoalesced.apply(ctx.input_device, ctx.num_inputs, *grad_outputs) + + +class ReduceAddCoalesced(Function): + + @staticmethod + def forward(ctx, destination, num_inputs, *grads): + ctx.target_gpus = [grads[i].get_device() for i in range(0, len(grads), num_inputs)] + + grads_ = [grads[i:i + num_inputs] + for i in range(0, len(grads), num_inputs)] + return comm.reduce_add_coalesced(grads_, destination) + + @staticmethod + def backward(ctx, *grad_outputs): + return (None, None,) + Broadcast.apply(ctx.target_gpus, *grad_outputs) + + +class Gather(Function): + + @staticmethod + def forward(ctx, target_device, dim, *inputs): + assert all(i.device.type != 'cpu' for i in inputs), ( + 'Gather function not implemented for CPU tensors' + ) + if (target_device == 'cpu'): + ctx.target_device = 'cpu' + else: + target_device = _get_device_index(target_device, True) + ctx.target_device = target_device + ctx.dim = dim + ctx.input_gpus = tuple(i.get_device() for i in inputs) + if all(t.dim() == 0 for t in inputs) and dim == 0: + inputs = tuple(t.view(1) for t in inputs) + warnings.warn('Was asked to gather along dimension 0, but all ' + 'input tensors were scalars; will instead unsqueeze ' + 'and return a vector.') + ctx.unsqueezed_scalar = True + else: + ctx.unsqueezed_scalar = False + ctx.input_sizes = tuple(i.size(ctx.dim) for i in inputs) + return comm.gather(inputs, ctx.dim, ctx.target_device) + + @staticmethod + def backward(ctx, grad_output): + scattered_grads = Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output) + if ctx.unsqueezed_scalar: + scattered_grads = tuple(g[0] for g in scattered_grads) + return (None, None) + scattered_grads + + +class Scatter(Function): + + @staticmethod + def forward(ctx, target_gpus, chunk_sizes, dim, input): + target_gpus = [_get_device_index(x, True) for x in target_gpus] + ctx.dim = dim + ctx.input_device = input.get_device() if input.device.type != "cpu" else -1 + streams = None + if torch.cuda.is_available() and ctx.input_device == -1: + # Perform CPU to GPU copies in a background stream + streams = [_get_stream(torch.device("cuda", device)) for device in target_gpus] + outputs = comm.scatter(input, target_gpus, chunk_sizes, ctx.dim, streams) + # Synchronize with the copy stream + if streams is not None: + for i, output in enumerate(outputs): + with torch.cuda.device(target_gpus[i]): + main_stream = torch.cuda.current_stream() + main_stream.wait_stream(streams[i]) + output.record_stream(main_stream) + return outputs + + @staticmethod + def backward(ctx, *grad_output): + return None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output) + + +# background streams used for copying +_streams: Optional[List[Optional[torch.Stream]]] = None + +def _get_stream(device: torch.device): + """Get a background stream for copying between CPU and target device.""" + global _streams + if device.type == "cpu": + return None + device_mod = getattr(torch, device.type, None) + if device_mod is None: + return None + if _streams is None: + _streams = [None] * device_mod.device_count() + if _streams[device.index] is None: + _streams[device.index] = device_mod.Stream(device.index) + return _streams[device.index] diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/comm.py b/venv/lib/python3.10/site-packages/torch/nn/parallel/comm.py new file mode 100644 index 0000000000000000000000000000000000000000..764775587d6859665fb13e2140c588ed5d91dc1c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/parallel/comm.py @@ -0,0 +1,236 @@ +import warnings +import torch +from torch.cuda import nccl +from torch._utils import _take_tensors, _flatten_dense_tensors, \ + _unflatten_dense_tensors, _reorder_tensors_as, _get_device_index, _handle_complex +from typing import List + +def broadcast(tensor, devices=None, *, out=None): + r"""Broadcasts a tensor to specified GPU devices. + + Args: + tensor (Tensor): tensor to broadcast. Can be on CPU or GPU. + devices (Iterable[torch.device, str or int], optional): an iterable of + GPU devices, among which to broadcast. + out (Sequence[Tensor], optional, keyword-only): the GPU tensors to + store output results. + + .. note:: + Exactly one of :attr:`devices` and :attr:`out` must be specified. + + Returns: + - If :attr:`devices` is specified, + a tuple containing copies of :attr:`tensor`, placed on + :attr:`devices`. + - If :attr:`out` is specified, + a tuple containing :attr:`out` tensors, each containing a copy of + :attr:`tensor`. + """ + tensor = _handle_complex(tensor) + if not ((devices is None) ^ (out is None)): + raise RuntimeError( + f"Exactly one of 'devices' and 'out' must be specified, but got devices={devices} and out={out}") + if devices is not None: + devices = [_get_device_index(d) for d in devices] + return torch._C._broadcast(tensor, devices) + else: + return torch._C._broadcast_out(tensor, out) + + +def broadcast_coalesced(tensors, devices, buffer_size=10485760): + """Broadcast a sequence of tensors to the specified GPUs. + + Small tensors are first coalesced into a buffer to reduce the number of synchronizations. + + Args: + tensors (sequence): tensors to broadcast. Must be on the same device, + either CPU or GPU. + devices (Iterable[torch.device, str or int]): an iterable of GPU + devices, among which to broadcast. + buffer_size (int): maximum size of the buffer used for coalescing + + Returns: + A tuple containing copies of :attr:`tensor`, placed on :attr:`devices`. + """ + devices = [_get_device_index(d) for d in devices] + tensors = [_handle_complex(t) for t in tensors] + return torch._C._broadcast_coalesced(tensors, devices, buffer_size) + + +def reduce_add(inputs, destination=None): + """Sum tensors from multiple GPUs. + + All inputs should have matching shapes, dtype, and layout. The output tensor + will be of the same shape, dtype, and layout. + + Args: + inputs (Iterable[Tensor]): an iterable of tensors to add. + destination (int, optional): a device on which the output will be + placed (default: current device). + + Returns: + A tensor containing an elementwise sum of all inputs, placed on the + :attr:`destination` device. + """ + destination = _get_device_index(destination, optional=True) + input_size = inputs[0].size() + root_index = None # index of input tensor that already is on the correct device + for i, inp in enumerate(inputs): + assert inp.device.type != "cpu", "reduce_add expects all inputs to be on GPUs" + if inp.get_device() == destination: + root_index = i + if inp.size() != input_size: + got = 'x'.join(str(x) for x in inp.size()) + expected = 'x'.join(str(x) for x in input_size) + raise ValueError(f"input {i} has invalid size: got {got}, but expected {expected}") + if root_index is None: + raise RuntimeError("reduce_add expects destination to be on the same GPU with one of the tensors") + + if len(inputs) == 1: + return inputs[0] + + if nccl.is_available(inputs): + result = torch.empty_like(inputs[root_index]) + nccl.reduce(inputs, output=result, root=root_index) + else: + destination_device = torch.device(inputs[root_index].device.type, destination) + nonroot = [t for i, t in enumerate(inputs) if i != root_index] + # make a new tensor w/o clone + result = inputs[root_index] + nonroot[0].to(device=destination_device, non_blocking=True) + for other in nonroot[1:]: + result.add_(other.to(device=destination_device, non_blocking=True)) + return result + + +def reduce_add_coalesced(inputs, destination=None, buffer_size=10485760): + """Sum tensors from multiple GPUs. + + Small tensors are first coalesced into a buffer to reduce the number + of synchronizations. + + Args: + inputs (Iterable[Iterable[Tensor]]): iterable of iterables that + contain tensors from a single device. + destination (int, optional): a device on which the output will be + placed (default: current device). + buffer_size (int): maximum size of the buffer used for coalescing + + Returns: + A tuple of tensors containing an elementwise sum of each group of + inputs, placed on the ``destination`` device. + """ + # TODO: When `len(inputs) == 1` and all inputs are on `destination`, just + # return `inputs`. + dense_tensors: List[List] = [[] for _ in inputs] # shape (num_gpus, num_tensors) + output = [] + ref_order = [] + # process sparse ones first since they may have different sizes on different gpus + for tensor_at_gpus in zip(*inputs): + if all(t.is_sparse for t in tensor_at_gpus): + result = reduce_add(tensor_at_gpus, destination) # this will be sparse too + output.append(result) + ref_order.append(tensor_at_gpus[0]) + else: + for coll, t in zip(dense_tensors, tensor_at_gpus): + coll.append(t.to_dense() if t.is_sparse else t) + ref_order.append(dense_tensors[0][-1]) + itrs = [_take_tensors(tensors, buffer_size) for tensors in dense_tensors] + # now the dense ones, which have consistent sizes + for chunks in zip(*itrs): + flat_tensors = [_flatten_dense_tensors(chunk) for chunk in chunks] # (num_gpus,) + flat_result = reduce_add(flat_tensors, destination) + for t in _unflatten_dense_tensors(flat_result, chunks[0]): + # The unflattened tensors do not share storage, and we don't expose + # base flat tensor anyways, so give them different version counters. + # See NOTE [ Version Counter in comm.*_coalesced ] + output.append(t.data) + return tuple(_reorder_tensors_as(output, ref_order)) + + +def scatter(tensor, devices=None, chunk_sizes=None, dim=0, streams=None, *, out=None): + """Scatters tensor across multiple GPUs. + + Args: + tensor (Tensor): tensor to scatter. Can be on CPU or GPU. + devices (Iterable[torch.device, str or int], optional): an iterable of + GPU devices, among which to scatter. + chunk_sizes (Iterable[int], optional): sizes of chunks to be placed on + each device. It should match :attr:`devices` in length and sums to + ``tensor.size(dim)``. If not specified, :attr:`tensor` will be divided + into equal chunks. + dim (int, optional): A dimension along which to chunk :attr:`tensor`. + Default: ``0``. + streams (Iterable[torch.cuda.Stream], optional): an iterable of Streams, among + which to execute the scatter. If not specified, the default stream will + be utilized. + out (Sequence[Tensor], optional, keyword-only): the GPU tensors to + store output results. Sizes of these tensors must match that of + :attr:`tensor`, except for :attr:`dim`, where the total size must + sum to ``tensor.size(dim)``. + + .. note:: + Exactly one of :attr:`devices` and :attr:`out` must be specified. When + :attr:`out` is specified, :attr:`chunk_sizes` must not be specified and + will be inferred from sizes of :attr:`out`. + + Returns: + - If :attr:`devices` is specified, + a tuple containing chunks of :attr:`tensor`, placed on + :attr:`devices`. + - If :attr:`out` is specified, + a tuple containing :attr:`out` tensors, each containing a chunk of + :attr:`tensor`. + """ + tensor = _handle_complex(tensor) + if out is None: + devices = [_get_device_index(d) for d in devices] + return tuple(torch._C._scatter(tensor, devices, chunk_sizes, dim, streams)) + else: + if devices is not None: + raise RuntimeError( + f"'devices' must not be specified when 'out' is specified, but got devices={devices}") + if chunk_sizes is not None: + raise RuntimeError( + f"'chunk_sizes' must not be specified when 'out' is specified, but got chunk_sizes={chunk_sizes}") + return tuple(torch._C._scatter_out(tensor, out, dim, streams)) + + +def gather(tensors, dim=0, destination=None, *, out=None): + r"""Gathers tensors from multiple GPU devices. + + Args: + tensors (Iterable[Tensor]): an iterable of tensors to gather. + Tensor sizes in all dimensions other than :attr:`dim` have to match. + dim (int, optional): a dimension along which the tensors will be + concatenated. Default: ``0``. + destination (torch.device, str, or int, optional): the output device. + Can be CPU or CUDA. Default: the current CUDA device. + out (Tensor, optional, keyword-only): the tensor to store gather result. + Its sizes must match those of :attr:`tensors`, except for :attr:`dim`, + where the size must equal ``sum(tensor.size(dim) for tensor in tensors)``. + Can be on CPU or CUDA. + + .. note:: + :attr:`destination` must not be specified when :attr:`out` is specified. + + Returns: + - If :attr:`destination` is specified, + a tensor located on :attr:`destination` device, that is a result of + concatenating :attr:`tensors` along :attr:`dim`. + - If :attr:`out` is specified, + the :attr:`out` tensor, now containing results of concatenating + :attr:`tensors` along :attr:`dim`. + """ + tensors = [_handle_complex(t) for t in tensors] + if out is None: + if destination == -1: + warnings.warn( + 'Using -1 to represent CPU tensor is deprecated. Please use a ' + 'device object or string instead, e.g., "cpu".') + destination = _get_device_index(destination, allow_cpu=True, optional=True) + return torch._C._gather(tensors, dim, destination) + else: + if destination is not None: + raise RuntimeError( + f"'destination' must not be specified when 'out' is specified, but got destination={destination}") + return torch._C._gather_out(tensors, out, dim) diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py b/venv/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..4471cee6f379fec8850e37163c11cde6838338fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py @@ -0,0 +1,269 @@ +import operator +import torch +import warnings +from itertools import chain +from typing import Any, Dict, Generic, List, Optional, Sequence, Tuple, TypeVar, Union +from ..modules import Module +from .scatter_gather import scatter_kwargs, gather +from .replicate import replicate +from .parallel_apply import parallel_apply +from torch._utils import ( + _get_all_device_indices, + _get_available_device_type, + _get_device_index, + _get_devices_properties +) + +__all__ = ['DataParallel', 'data_parallel'] + +def _check_balance(device_ids: Sequence[Union[int, torch.device]]) -> None: + imbalance_warn = """ + There is an imbalance between your GPUs. You may want to exclude GPU {} which + has less than 75% of the memory or cores of GPU {}. You can do so by setting + the device_ids argument to DataParallel, or by setting the CUDA_VISIBLE_DEVICES + environment variable.""" + device_ids = [_get_device_index(x, True) for x in device_ids] + dev_props = _get_devices_properties(device_ids) + + def warn_imbalance(get_prop): + values = [get_prop(props) for props in dev_props] + min_pos, min_val = min(enumerate(values), key=operator.itemgetter(1)) + max_pos, max_val = max(enumerate(values), key=operator.itemgetter(1)) + if min_val / max_val < 0.75: + warnings.warn(imbalance_warn.format(device_ids[min_pos], device_ids[max_pos])) + return True + return False + + if warn_imbalance(lambda props: props.total_memory): + return + if warn_imbalance(lambda props: props.multi_processor_count): + return + + +T = TypeVar("T", bound=Module) + + +class DataParallel(Module, Generic[T]): + r"""Implements data parallelism at the module level. + + This container parallelizes the application of the given :attr:`module` by + splitting the input across the specified devices by chunking in the batch + dimension (other objects will be copied once per device). In the forward + pass, the module is replicated on each device, and each replica handles a + portion of the input. During the backwards pass, gradients from each replica + are summed into the original module. + + The batch size should be larger than the number of GPUs used. + + .. warning:: + It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`, + instead of this class, to do multi-GPU training, even if there is only a single + node. See: :ref:`cuda-nn-ddp-instead` and :ref:`ddp`. + + Arbitrary positional and keyword inputs are allowed to be passed into + DataParallel but some types are specially handled. tensors will be + **scattered** on dim specified (default 0). tuple, list and dict types will + be shallow copied. The other types will be shared among different threads + and can be corrupted if written to in the model's forward pass. + + The parallelized :attr:`module` must have its parameters and buffers on + ``device_ids[0]`` before running this :class:`~torch.nn.DataParallel` + module. + + .. warning:: + In each forward, :attr:`module` is **replicated** on each device, so any + updates to the running module in ``forward`` will be lost. For example, + if :attr:`module` has a counter attribute that is incremented in each + ``forward``, it will always stay at the initial value because the update + is done on the replicas which are destroyed after ``forward``. However, + :class:`~torch.nn.DataParallel` guarantees that the replica on + ``device[0]`` will have its parameters and buffers sharing storage with + the base parallelized :attr:`module`. So **in-place** updates to the + parameters or buffers on ``device[0]`` will be recorded. E.g., + :class:`~torch.nn.BatchNorm2d` and :func:`~torch.nn.utils.spectral_norm` + rely on this behavior to update the buffers. + + .. warning:: + Forward and backward hooks defined on :attr:`module` and its submodules + will be invoked ``len(device_ids)`` times, each with inputs located on + a particular device. Particularly, the hooks are only guaranteed to be + executed in correct order with respect to operations on corresponding + devices. For example, it is not guaranteed that hooks set via + :meth:`~torch.nn.Module.register_forward_pre_hook` be executed before + `all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but + that each such hook be executed before the corresponding + :meth:`~torch.nn.Module.forward` call of that device. + + .. warning:: + When :attr:`module` returns a scalar (i.e., 0-dimensional tensor) in + :func:`forward`, this wrapper will return a vector of length equal to + number of devices used in data parallelism, containing the result from + each device. + + .. note:: + There is a subtlety in using the + ``pack sequence -> recurrent network -> unpack sequence`` pattern in a + :class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`. + See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for + details. + + + Args: + module (Module): module to be parallelized + device_ids (list of int or torch.device): CUDA devices (default: all devices) + output_device (int or torch.device): device location of output (default: device_ids[0]) + + Attributes: + module (Module): the module to be parallelized + + Example:: + + >>> # xdoctest: +SKIP + >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2]) + >>> output = net(input_var) # input_var can be on any device, including CPU + """ + + # TODO: update notes/cuda.rst when this class handles 8+ GPUs well + + def __init__( + self, + module: T, + device_ids: Optional[Sequence[Union[int, torch.device]]] = None, + output_device: Optional[Union[int, torch.device]] = None, + dim: int = 0, + ) -> None: + super().__init__() + torch._C._log_api_usage_once("torch.nn.parallel.DataParallel") + device_type = _get_available_device_type() + if device_type is None: + self.module = module + self.device_ids = [] + return + + if device_ids is None: + device_ids = _get_all_device_indices() + + if device_ids is None: + raise RuntimeError("no available devices were found") + + if output_device is None: + output_device = device_ids[0] + + self.dim = dim + self.module = module + self.device_ids = [_get_device_index(x, True) for x in device_ids] + self.output_device = _get_device_index(output_device, True) + self.src_device_obj = torch.device(device_type, self.device_ids[0]) + + if device_type == "cuda": + _check_balance(self.device_ids) + + if len(self.device_ids) == 1: + self.module.to(self.src_device_obj) + + def forward(self, *inputs: Any, **kwargs: Any) -> Any: + with torch.autograd.profiler.record_function("DataParallel.forward"): + if not self.device_ids: + return self.module(*inputs, **kwargs) + + for t in chain(self.module.parameters(), self.module.buffers()): + if t.device != self.src_device_obj: + raise RuntimeError("module must have its parameters and buffers " + f"on device {self.src_device_obj} (device_ids[0]) but found one of " + f"them on device: {t.device}") + + inputs, module_kwargs = self.scatter(inputs, kwargs, self.device_ids) + # for forward function without any inputs, empty list and dict will be created + # so the module can be executed on one device which is the first one in device_ids + if not inputs and not module_kwargs: + inputs = ((),) + module_kwargs = ({},) + + if len(self.device_ids) == 1: + return self.module(*inputs[0], **module_kwargs[0]) + replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) + outputs = self.parallel_apply(replicas, inputs, module_kwargs) + return self.gather(outputs, self.output_device) + + def replicate(self, module: T, device_ids: Sequence[Union[int, torch.device]]) -> List[T]: + return replicate(module, device_ids, not torch.is_grad_enabled()) + + def scatter( + self, + inputs: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]], + device_ids: Sequence[Union[int, torch.device]], + ) -> Any: + return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) + + def parallel_apply(self, replicas: Sequence[T], inputs: Sequence[Any], kwargs: Any) -> List[Any]: + return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) + + def gather(self, outputs: Any, output_device: Union[int, torch.device]) -> Any: + return gather(outputs, output_device, dim=self.dim) + + +def data_parallel( + module: Module, + inputs: Any, + device_ids: Optional[Sequence[Union[int, torch.device]]] = None, + output_device: Optional[Union[int, torch.device]] = None, + dim: int = 0, + module_kwargs: Optional[Any] = None, +) -> torch.Tensor: + r"""Evaluate module(input) in parallel across the GPUs given in device_ids. + + This is the functional version of the DataParallel module. + + Args: + module (Module): the module to evaluate in parallel + inputs (Tensor): inputs to the module + device_ids (list of int or torch.device): GPU ids on which to replicate module + output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU. + (default: device_ids[0]) + Returns: + a Tensor containing the result of module(input) located on + output_device + """ + if not isinstance(inputs, tuple): + inputs = (inputs,) if inputs is not None else () + + device_type = _get_available_device_type() + + if device_type is None: + raise RuntimeError("device type could not be determined") + + if device_ids is None: + device_ids = _get_all_device_indices() + + if device_ids is None: + raise RuntimeError("no available devices were found") + + if output_device is None: + output_device = device_ids[0] + + device_ids = [_get_device_index(x, True) for x in device_ids] + output_device = _get_device_index(output_device, True) + src_device_obj = torch.device(device_type, device_ids[0]) + + for t in chain(module.parameters(), module.buffers()): + if t.device != src_device_obj: + raise RuntimeError("module must have its parameters and buffers " + f"on device {src_device_obj} (device_ids[0]) but found one of " + f"them on device: {t.device}") + + inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim) + # for module without any inputs, empty list and dict will be created + # so the module can be executed on one device which is the first one in device_ids + if not inputs and not module_kwargs: + inputs = ((),) + module_kwargs = ({},) + + assert module_kwargs is not None + + if len(device_ids) == 1: + return module(*inputs[0], **module_kwargs[0]) + used_device_ids = device_ids[:len(inputs)] + replicas = replicate(module, used_device_ids) + outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids) + return gather(outputs, output_device, dim) diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/distributed.py b/venv/lib/python3.10/site-packages/torch/nn/parallel/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..e968a99cf85ddd09f4d6cf1bc2ad76e69f1b552b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/parallel/distributed.py @@ -0,0 +1,2350 @@ +import copy +import functools +import inspect +import itertools +import logging +import os +import sys +import warnings +import weakref +from collections import defaultdict, deque +from contextlib import contextmanager +from dataclasses import dataclass, fields, is_dataclass +from enum import auto, Enum +from typing import Any, Callable, List, Optional, Tuple, Type + +import torch +import torch.distributed as dist +from torch.autograd import Function, Variable +from torch.distributed.algorithms.join import Join, Joinable, JoinHook +from torch.utils._pytree import tree_flatten, tree_unflatten +from torch.utils.hooks import RemovableHandle + +RPC_AVAILABLE = False +if dist.is_available(): + from torch.distributed.distributed_c10d import ( + _get_default_group, + _rank_not_in_group, + ReduceOp, + ) + from torch.distributed.utils import ( + _alloc_storage, + _cast_forward_inputs, + _free_storage, + _sync_module_states, + _to_kwargs, + _verify_param_shape_across_processes, + ) +if torch.distributed.rpc.is_available(): + RPC_AVAILABLE = True + from torch.distributed.rpc import RRef + +from torch._utils import _get_device_index + +from ..modules import Module +from .scatter_gather import gather, scatter_kwargs # noqa: F401 + +__all__ = ["DistributedDataParallel"] + +logger = logging.getLogger(__name__) + + +@dataclass +class _MixedPrecision: + """ + This configures DDP-native mixed precision training. + + Attributes: + param_dtype (torch.dtype): This specifies the dtype for model + parameters, inputs (when ``cast_forward_inputs`` is set to + ``True``), and therefore the dtype for computation. + However, outside the forward and backward passes, parameters are in + full precision. Model checkpointing always happens in full + precision. + reduce_dtype (torch.dtype): This specifies the dtype for gradient + reduction, which is permitted to differ from ``param_dtype``. + buffer_dtype (torch.dtype): This specifies the dtype for buffers. + + .. note:: This API is experimental and subject to change. + + .. note:: Only floating point tensors are cast to their specified dtypes. + + .. note:: ``state_dict`` checkpoints parameters and buffers in full + precision. + + .. note:: Each low precision dtype must be specified explicitly. For + example, ``_MixedPrecision(reduce_dtype=torch.float16)`` only specifies + the reduction dtype to be low precision, and DDP will not cast + parameters or buffers. + + .. note:: If a ``reduce_dtype`` is not specified, then gradient reduction + happens in ``param_dtype`` if specified or the original parameter dtype + otherwise. For example, ``_MixedPrecision(param_dtype=torch.float16)`` + would result in communication occurring in fp16. + """ + + param_dtype: Optional[torch.dtype] = None + reduce_dtype: Optional[torch.dtype] = None + buffer_dtype: Optional[torch.dtype] = None + # TODO (rohan-varma): keep_low_precision_grads: bool = False + # TODO (rohan-varma): APIs to allow users to run batchnorm and layernorm + # in full precision. For DDP, this can be implemented by not performing the + # parameter cast for BN and LN units. + + +def _cast_buffers(mixed_precision_config, root_module): + """Casts buffers to the given ``buffer_dtype``.""" + for buf in root_module.buffers(): + if hasattr(buf, "_ddp_ignored") and buf._ddp_ignored: + continue + + buf.data = buf.to(dtype=mixed_precision_config.buffer_dtype) + + +def _setup_mixed_precision_params(mixed_precision_config, root_module): + """Create and free storage for the mixed precision parameters.""" + for param in root_module.parameters(): + # Do not setup mixed precision for DDP ignored parameters. + if hasattr(param, "_ddp_ignored") and param._ddp_ignored: + continue + + if not hasattr(param, "_mp_param"): + param._mp_param = torch.zeros_like( + param, + device=param.device, + dtype=mixed_precision_config.param_dtype, + requires_grad=param.requires_grad, + ) + _free_storage(param._mp_param) + # _fp_param will point to the full precision param so it can be switched + # back to at the end of forward / backward. + param._fp_param = param.data + + +def _tree_flatten_with_rref(output): + output_is_rref = RPC_AVAILABLE and isinstance(output, RRef) + if output_is_rref: + output_tensor_list, treespec = tree_flatten(output.local_value()) + else: + output_tensor_list, treespec = tree_flatten(output) + # Need to return flattened tensors, spec to re-pack them, as well + # as if the return type was actually an RRef to reconstruct. + return output_tensor_list, treespec, output_is_rref + + +def _tree_unflatten_with_rref(output, treespec, output_is_rref): + output = tree_unflatten(output, treespec) + if output_is_rref: + output = RRef(output) + return output + + +def _find_tensors(obj): + r"""Recursively find all tensors contained in the specified object.""" + if RPC_AVAILABLE and isinstance(obj, RRef): + # If the current node is the owner of the RRef, unwrap it and try to + # find Tensors. + # TODO: Expand to remote RRefs. + if obj.is_owner(): + return _find_tensors(obj.local_value()) + if isinstance(obj, torch.Tensor): + return [obj] + if isinstance(obj, (list, tuple)): + return itertools.chain.from_iterable(map(_find_tensors, obj)) + if isinstance(obj, dict): + return itertools.chain.from_iterable(map(_find_tensors, obj.values())) + if is_dataclass(obj): + return itertools.chain.from_iterable( + map(_find_tensors, (getattr(obj, f.name) for f in fields(obj))) + ) + + return [] + + +def _dump_DDP_relevant_env_vars(): + relevant_env_vars = [ + "RANK", + "LOCAL_RANK", + "WORLD_SIZE", + "MASTER_PORT", + "MASTER_ADDR", + "CUDA_VISIBLE_DEVICES", + "GLOO_SOCKET_IFNAME", + "GLOO_DEVICE_TRANSPORT", + "NCCL_SOCKET_IFNAME", + "TORCH_NCCL_BLOCKING_WAIT", + "NCCL_DEBUG", + "NCCL_DEBUG_SUBSYS", + "NCCL_IB_DISABLE", + # More NCCL env vars: + "NCCL_P2P_DISABLE", + "NCCL_P2P_LEVEL", + "NCCL_SHM_DISABLE", + "NCCL_SOCKET_NTHREADS", + "NCCL_NSOCKS_PERTHREAD", + "NCCL_BUFFSIZE", + "NCCL_NTHREADS", + "NCCL_RINGS", + "NCCL_MAX_NCHANNELS", + "NCCL_MIN_NCHANNELS", + "NCCL_CHECKS_DISABLE", + "NCCL_CHECK_POINTERS", + "NCCL_LAUNCH_MODE", + "NCCL_IB_HCA", + "NCCL_IB_TIMEOUT", + "NCCL_IB_RETRY_CNT", + "NCCL_IB_GID_INDEX", + "NCCL_IB_SL", + "NCCL_IB_TC", + "NCCL_IB_AR_THRESHOLD", + "NCCL_IB_CUDA_SUPPORT", + "NCCL_NET_GDR_LEVEL", + "NCCL_NET_GDR_READ", + "NCCL_SINGLE_RING_THRESHOLD", + "NCCL_LL_THRESHOLD", + "NCCL_TREE_THRESHOLD", + "NCCL_ALGO", + "NCCL_PROTO", + "NCCL_IGNORE_CPU_AFFINITY", + "NCCL_DEBUG_FILE", + "NCCL_COLLNET_ENABLE", + "NCCL_TOPO_FILE", + "NCCL_TOPO_DUMP_FILE", + "TORCH_NCCL_ASYNC_ERROR_HANDLING", + ] + formatted_output = "" + for var in relevant_env_vars: + value = os.environ[var] if var in os.environ else "N/A" + formatted_output += f"env:{var}={value}\n" + print(formatted_output) + + +class _BufferCommHookLocation(Enum): + PRE_FORWARD = auto() + POST_FORWARD = auto() + + +@dataclass +class _BufferCommHook: + buffer_comm_hook: Callable + buffer_comm_hook_state: Any + buffer_comm_hook_location: _BufferCommHookLocation + + +# Add a DDPSink to run various functions when backwards starts, such as +# queueing call back of out-most backward/graph task, +# this helps call back is fired after all gradients' calculation +# is completed. +class _DDPSink(Function): + @staticmethod + def forward(ctx, ddp_weakref, *inputs): + # set_materialize_grads(False) will ensure that None gradients stay as + # None and are not filled with zeros. + ctx.set_materialize_grads(False) + ctx.ddp_weakref = ddp_weakref + ret = tuple( + inp.clone() if isinstance(inp, torch.Tensor) else inp for inp in inputs + ) + return ret + + @staticmethod + def backward(ctx, *grad_outputs): + # Enqueue delay allreduce for static graph training on the first + # iteration. + ddp_weakref = ctx.ddp_weakref() + reducer = ddp_weakref.reducer + static_graph = ddp_weakref.static_graph + delay_ar_enqueued = ( + static_graph and ddp_weakref._static_graph_delay_allreduce_enqueued + ) + if static_graph and not delay_ar_enqueued: + Variable._execution_engine.queue_callback( # type: ignore[call-arg,misc] + reducer._delay_all_reduce + ) + ddp_weakref._static_graph_delay_allreduce_enqueued = True + + return (None, *grad_outputs) + + +class _DDPJoinHook(JoinHook): + def __init__(self, ddp, divide_by_initial_world_size): + """Set config variables for internal usage.""" + assert isinstance(ddp, DistributedDataParallel), ( + "DDP join hook requires passing in a DistributedDataParallel " + "instance as the state" + ) + assert ddp.logger is not None + ddp.logger._set_uneven_input_join() + self.ddp = ddp + self.ddp._divide_by_initial_world_size = divide_by_initial_world_size + super().__init__() + + def main_hook(self): + """Shadow the DDP collective communication operations in the forward and backward passes.""" + ddp = self.ddp + # Buckets are rebuilt only once during a training period + ddp.reducer._rebuild_buckets() + + # Schedule a broadcast if we are syncing module buffers in the + # forward pass + # TODO: make DDP uneven inputs context manager support buffer + # comm hook (https://github.com/pytorch/pytorch/issues/65436) + ddp._check_and_sync_module_buffers() + + # Check if need to sync in the backward pass + should_sync_backwards = ddp._check_global_requires_backward_grad_sync( + is_joined_rank=True + ) + # Forward parameter sync is disabled in the next iteration if we + # are skipping gradient sync this iteration, so set + # `require_forward_param_sync` accordingly + ddp.require_forward_param_sync = should_sync_backwards + if not should_sync_backwards: + return + + # Schedule one allreduce per gradient bucket to match the backward + # pass allreduce + ddp._match_all_reduce_for_bwd_pass() + + # Check if we need to allreduce locally unused parameters + if ddp.find_unused_parameters: + ddp._match_unused_params_allreduce() + + # Rebuilt parameters are pushed only once during a training period + ddp.reducer._push_all_rebuilt_params() + + def post_hook(self, is_last_joiner: bool): + """Sync the final model to ensure that the model is the same across all processes.""" + self.ddp._sync_final_model(is_last_joiner) + + +class DistributedDataParallel(Module, Joinable): + r"""Implement distributed data parallelism based on ``torch.distributed`` at module level. + + This container provides data parallelism by synchronizing gradients + across each model replica. The devices to synchronize across are + specified by the input ``process_group``, which is the entire world + by default. Note that ``DistributedDataParallel`` does not chunk or + otherwise shard the input across participating GPUs; the user is + responsible for defining how to do so, for example through the use + of a :class:`DistributedSampler`. + + See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`. + The same constraints on input as in :class:`torch.nn.DataParallel` apply. + + Creation of this class requires that ``torch.distributed`` to be already + initialized, by calling :func:`torch.distributed.init_process_group`. + + ``DistributedDataParallel`` is proven to be significantly faster than + :class:`torch.nn.DataParallel` for single-node multi-GPU data + parallel training. + + To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn + up ``N`` processes, ensuring that each process exclusively works on a single + GPU from 0 to N-1. This can be done by either setting + ``CUDA_VISIBLE_DEVICES`` for every process or by calling: + + >>> # xdoctest: +SKIP("undefined variables") + >>> torch.cuda.set_device(i) + + where i is from 0 to N-1. In each process, you should refer the following + to construct this module: + + >>> # xdoctest: +SKIP("undefined variables") + >>> torch.distributed.init_process_group( + >>> backend='nccl', world_size=N, init_method='...' + >>> ) + >>> model = DistributedDataParallel(model, device_ids=[i], output_device=i) + + In order to spawn up multiple processes per node, you can use either + ``torch.distributed.launch`` or ``torch.multiprocessing.spawn``. + + .. note:: + Please refer to `PyTorch Distributed Overview `__ + for a brief introduction to all features related to distributed training. + + .. note:: + ``DistributedDataParallel`` can be used in conjunction with + :class:`torch.distributed.optim.ZeroRedundancyOptimizer` to reduce + per-rank optimizer states memory footprint. Please refer to + `ZeroRedundancyOptimizer recipe `__ + for more details. + + .. note:: ``nccl`` backend is currently the fastest and highly recommended + backend when using GPUs. This applies to both single-node and + multi-node distributed training. + + .. note:: This module also supports mixed-precision distributed training. + This means that your model can have different types of parameters such + as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these + mixed types of parameters will just work fine. + + .. note:: If you use ``torch.save`` on one process to checkpoint the module, + and ``torch.load`` on some other processes to recover it, make sure that + ``map_location`` is configured properly for every process. Without + ``map_location``, ``torch.load`` would recover the module to devices + where the module was saved from. + + .. note:: When a model is trained on ``M`` nodes with ``batch=N``, the + gradient will be ``M`` times smaller when compared to the same model + trained on a single node with ``batch=M*N`` if the loss is summed (NOT + averaged as usual) across instances in a batch (because the gradients + between different nodes are averaged). You should take this into + consideration when you want to obtain a mathematically equivalent + training process compared to the local training counterpart. But in most + cases, you can just treat a DistributedDataParallel wrapped model, a + DataParallel wrapped model and an ordinary model on a single GPU as the + same (E.g. using the same learning rate for equivalent batch size). + + .. note:: + Parameters are never broadcast between processes. The module performs + an all-reduce step on gradients and assumes that they will be modified + by the optimizer in all processes in the same way. Buffers + (e.g. BatchNorm stats) are broadcast from the module in process of rank + 0, to all other replicas in the system in every iteration. + + .. note:: + If you are using DistributedDataParallel in conjunction with the + :ref:`distributed-rpc-framework`, you should always use + :meth:`torch.distributed.autograd.backward` to compute gradients and + :class:`torch.distributed.optim.DistributedOptimizer` for optimizing + parameters. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> import torch.distributed.autograd as dist_autograd + >>> from torch.nn.parallel import DistributedDataParallel as DDP + >>> import torch + >>> from torch import optim + >>> from torch.distributed.optim import DistributedOptimizer + >>> import torch.distributed.rpc as rpc + >>> from torch.distributed.rpc import RRef + >>> + >>> t1 = torch.rand((3, 3), requires_grad=True) + >>> t2 = torch.rand((3, 3), requires_grad=True) + >>> rref = rpc.remote("worker1", torch.add, args=(t1, t2)) + >>> ddp_model = DDP(my_model) + >>> + >>> # Setup optimizer + >>> optimizer_params = [rref] + >>> for param in ddp_model.parameters(): + >>> optimizer_params.append(RRef(param)) + >>> + >>> dist_optim = DistributedOptimizer( + >>> optim.SGD, + >>> optimizer_params, + >>> lr=0.05, + >>> ) + >>> + >>> with dist_autograd.context() as context_id: + >>> pred = ddp_model(rref.to_here()) + >>> loss = loss_func(pred, target) + >>> dist_autograd.backward(context_id, [loss]) + >>> dist_optim.step(context_id) + + .. note:: + DistributedDataParallel currently offers limited support for gradient + checkpointing with :meth:`torch.utils.checkpoint`. + If the checkpoint is done with use_reentrant=False (recommended), DDP + will work as expected without any limitations. + If, however, the checkpoint is done with use_reentrant=True (the default), + DDP will work as expected when there are no unused parameters in the model + and each layer is checkpointed at most once (make sure you are not passing + `find_unused_parameters=True` to DDP). We currently do not support the + case where a layer is checkpointed multiple times, or when there unused + parameters in the checkpointed model. + + .. note:: + To let a non-DDP model load a state dict from a DDP model, + :meth:`~torch.nn.modules.utils.consume_prefix_in_state_dict_if_present` + needs to be applied to strip the prefix "module." in the DDP state dict before loading. + + .. warning:: + Constructor, forward method, and differentiation of the output (or a + function of the output of this module) are distributed synchronization + points. Take that into account in case different processes might be + executing different code. + + .. warning:: + This module assumes all parameters are registered in the model by the + time it is created. No parameters should be added nor removed later. + Same applies to buffers. + + .. warning:: + This module assumes all parameters are registered in the model of each + distributed processes are in the same order. The module itself will + conduct gradient ``allreduce`` following the reverse order of the + registered parameters of the model. In other words, it is users' + responsibility to ensure that each distributed process has the exact + same model and thus the exact same parameter registration order. + + .. warning:: + This module allows parameters with non-rowmajor-contiguous strides. + For example, your model may contain some parameters whose + :class:`torch.memory_format` is ``torch.contiguous_format`` + and others whose format is ``torch.channels_last``. However, + corresponding parameters in different processes must have the + same strides. + + .. warning:: + This module doesn't work with :func:`torch.autograd.grad` (i.e. it will + only work if gradients are to be accumulated in ``.grad`` attributes of + parameters). + + .. warning:: + If you plan on using this module with a ``nccl`` backend or a ``gloo`` + backend (that uses Infiniband), together with a DataLoader that uses + multiple workers, please change the multiprocessing start method to + ``forkserver`` (Python 3 only) or ``spawn``. Unfortunately + Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will + likely experience deadlocks if you don't change this setting. + + .. warning:: + You should never try to change your model's parameters after wrapping + up your model with ``DistributedDataParallel``. Because, when + wrapping up your model with ``DistributedDataParallel``, the constructor + of ``DistributedDataParallel`` will register the additional gradient + reduction functions on all the parameters of the model itself at the + time of construction. If you change the model's parameters afterwards, + gradient reduction functions no longer match the correct set of + parameters. + + .. warning:: + Using ``DistributedDataParallel`` in conjunction with the + :ref:`distributed-rpc-framework` is experimental and subject to change. + + Args: + module (Module): module to be parallelized + device_ids (list of int or torch.device): CUDA devices. + 1) For single-device modules, ``device_ids`` can + contain exactly one device id, which represents the only + CUDA device where the input module corresponding to this process resides. + Alternatively, ``device_ids`` can also be ``None``. + 2) For multi-device modules and CPU modules, + ``device_ids`` must be ``None``. + + When ``device_ids`` is ``None`` for both cases, + both the input data for the forward pass and the actual module + must be placed on the correct device. + (default: ``None``) + output_device (int or torch.device): Device location of output for + single-device CUDA modules. For multi-device modules and + CPU modules, it must be ``None``, and the module itself + dictates the output location. (default: ``device_ids[0]`` + for single-device modules) + broadcast_buffers (bool): Flag that enables syncing (broadcasting) + buffers of the module at beginning of the ``forward`` + function. (default: ``True``) + process_group: The process group to be used for distributed data + all-reduction. If ``None``, the default process group, which + is created by :func:`torch.distributed.init_process_group`, + will be used. (default: ``None``) + bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into + multiple buckets so that gradient reduction of each + bucket can potentially overlap with backward computation. + :attr:`bucket_cap_mb` controls the bucket size in + MegaBytes (MB). (default: 25) + find_unused_parameters (bool): Traverse the autograd graph from all + tensors contained in the return value of the + wrapped module's ``forward`` function. Parameters + that don't receive gradients as part of this + graph are preemptively marked as being ready to + be reduced. In addition, parameters that may have + been used in the wrapped module's ``forward`` + function but were not part of loss computation and + thus would also not receive gradients are + preemptively marked as ready to be reduced. + (default: ``False``) + check_reduction: This argument is deprecated. + gradient_as_bucket_view (bool): When set to ``True``, gradients will be views + pointing to different offsets of ``allreduce`` communication + buckets. This can reduce peak memory usage, where the + saved memory size will be equal to the total gradients + size. Moreover, it avoids the overhead of copying between + gradients and ``allreduce`` communication buckets. When + gradients are views, ``detach_()`` cannot be called on the + gradients. If hitting such errors, please fix it by + referring to the :meth:`~torch.optim.Optimizer.zero_grad` + function in ``torch/optim/optimizer.py`` as a solution. + Note that gradients will be views after first iteration, so + the peak memory saving should be checked after first iteration. + static_graph (bool): When set to ``True``, DDP knows the trained graph is + static. Static graph means 1) The set of used and unused + parameters will not change during the whole training loop; in + this case, it does not matter whether users set + ``find_unused_parameters = True`` or not. 2) How the graph is trained + will not change during the whole training loop (meaning there is + no control flow depending on iterations). + When static_graph is set to be ``True``, DDP will support cases that + can not be supported in the past: + 1) Reentrant backwards. + 2) Activation checkpointing multiple times. + 3) Activation checkpointing when model has unused parameters. + 4) There are model parameters that are outside of forward function. + 5) Potentially improve performance when there are unused parameters, + as DDP will not search graph in each iteration to detect unused + parameters when static_graph is set to be ``True``. + To check whether you can set static_graph to be ``True``, one way is to + check ddp logging data at the end of your previous model training, + if ``ddp_logging_data.get("can_set_static_graph") == True``, mostly you + can set ``static_graph = True`` as well. + + Example:: + >>> # xdoctest: +SKIP("undefined variables") + >>> model_DDP = torch.nn.parallel.DistributedDataParallel(model) + >>> # Training loop + >>> ... + >>> ddp_logging_data = model_DDP._get_ddp_logging_data() + >>> static_graph = ddp_logging_data.get("can_set_static_graph") + delay_all_reduce_named_params (list of tuple of str and torch.nn.Parameter): a list + of named parameters whose all reduce will be delayed when the gradient of + the parameter specified in ``param_to_hook_all_reduce`` is ready. Other + arguments of DDP do not apply to named params specified in this argument + as these named params will be ignored by DDP reducer. + param_to_hook_all_reduce (torch.nn.Parameter): a parameter to hook delayed all reduce + of parameters specified in ``delay_all_reduce_named_params``. + + + Attributes: + module (Module): the module to be parallelized. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...') + >>> net = torch.nn.parallel.DistributedDataParallel(model) + """ + + # used to track whether the given thread is inside ddp forward for torchdynamo purposes + _active_ddp_module: Optional["DistributedDataParallel"] = None + + def __init__( + self, + module, + device_ids=None, + output_device=None, + dim=0, + broadcast_buffers=True, + process_group=None, + bucket_cap_mb=25, + find_unused_parameters=False, + check_reduction=False, + gradient_as_bucket_view=False, + static_graph=False, + delay_all_reduce_named_params=None, + param_to_hook_all_reduce=None, + mixed_precision: Optional[_MixedPrecision] = None, + device_mesh=None, + ): + super().__init__() + Joinable.__init__(self) + self.logger = None + if bool(delay_all_reduce_named_params is not None) != bool( + param_to_hook_all_reduce is not None + ): + self._log_and_throw( + ValueError, + "delay_all_reduce_named_params and param_to_hook_all_reduce " + "need to be set at the same time.", + ) + + self._delay_all_reduce_params = [] + if hasattr(module, "_ddp_params_and_buffers_to_ignore"): + self.parameters_to_ignore = set(module._ddp_params_and_buffers_to_ignore) + else: + self.parameters_to_ignore = set() + if delay_all_reduce_named_params is not None: + for name, param in delay_all_reduce_named_params: + self.parameters_to_ignore.add(name) + self._delay_all_reduce_params.append(param) + + self._module_parameters = [ + p + for n, p in module.named_parameters() + if n not in self.parameters_to_ignore + ] + if not any(p.requires_grad for p in self._module_parameters): + if len(self._delay_all_reduce_params): + logger.info("Delay the AllReduce of all parameters.") + else: + self._log_and_throw( + RuntimeError, + "DistributedDataParallel is not needed when a module " + "doesn't have any parameter that requires a gradient.", + ) + + if device_ids is not None and len(device_ids) > 1: + self._log_and_throw( + ValueError, + "device_ids can only be None or contain a single element.", + ) + + self.is_multi_device_module = ( + len({p.device for p in self._module_parameters}) > 1 + ) + distinct_device_types = { + p.device.type for p in self._module_parameters if p.device is not None + } + if len(distinct_device_types) != 1: + self._log_and_throw( + ValueError, + "DistributedDataParallel's input module must be on " + f"the same type of devices, but input module parameters locate in {distinct_device_types}.", + ) + + self.device_type = next(iter(distinct_device_types)) + + if ( + device_ids is None + or len(device_ids) == 0 # For backward compatibility. + or self.device_type == "cpu" + or self.is_multi_device_module + ): + if device_ids or output_device: + self._log_and_throw( + ValueError, + "DistributedDataParallel device_ids and output_device arguments " + "only work with single-device/multiple-device GPU modules or CPU modules, " + "but got device_ids {}, output_device {}, and module parameters {}.".format( + device_ids, + output_device, + {p.device for p in self._module_parameters}, + ), + ) + + self.device_ids = None + self.output_device = None + else: + self.device_ids = [_get_device_index(x, True) for x in device_ids] + + if output_device is None: + output_device = device_ids[0] + + self.output_device = _get_device_index(output_device, True) + + if process_group and device_mesh is not None: + raise RuntimeError( + "Cannot specify both process_group and device_mesh arguments." + ) + elif process_group is None and device_mesh is None: + self.process_group = _get_default_group() + elif device_mesh is None: + self.process_group = process_group + else: + if device_mesh.ndim != 1: + raise RuntimeError( + f"Only 1D device mesh is supported, but got {device_mesh}." + ) + self.device_mesh = device_mesh + self.process_group = device_mesh.get_group(mesh_dim=0) + + self.static_graph = False + self.dim = dim + self.module = module + self.device = next(iter(self._module_parameters)).device + self.broadcast_buffers = broadcast_buffers + self.find_unused_parameters = find_unused_parameters + self.require_backward_grad_sync = True + self.require_forward_param_sync = True + self.gradient_as_bucket_view = gradient_as_bucket_view + self.mixed_precision = mixed_precision + if self.mixed_precision is not None: + logger.warning("Received mixed precision config %s", self.mixed_precision) + + if check_reduction: + # This argument is no longer used since the reducer + # will ensure reduction completes even if some parameters + # do not receive gradients. + warnings.warn( + "The `check_reduction` argument in `DistributedDataParallel` " + "module is deprecated. Please avoid using it." + ) + + # Check that a module does not have Uninitialized parameters + for param in self._module_parameters: + if isinstance(param, torch.nn.parameter.UninitializedParameter): + self._log_and_throw( + RuntimeError, + "Modules with uninitialized parameters can't be used with `DistributedDataParallel`. " + "Run a dummy forward pass to correctly initialize the modules", + ) + # used for intra-node param sync and inter-node sync as well + self.broadcast_bucket_size = int(250 * 1024 * 1024) + + # reduction bucket size + self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024) + # Whether to perform input tensor CPU to GPU copies on a side-stream + self.use_side_stream_for_tensor_copies = ( + os.environ.get("PYTORCH_DDP_USE_SIDE_STREAM", "1") == "1" + ) + + # Initialize gradient buffers and register all reduce hook + self._delay_grad_buffer = None + self._delay_grad_views: List[torch.Tensor] = [] + self._delay_all_reduce_all_params = False + if len(self._delay_all_reduce_params) != 0: + self._register_delay_all_reduce_hook( + bucket_cap_mb=bucket_cap_mb, + param_to_hook_all_reduce=param_to_hook_all_reduce, + device_ids=device_ids, + ) + if self._delay_all_reduce_all_params: + return + + # Build parameters for reducer. + parameters, expect_sparse_gradient = self._build_params_for_reducer() + # Verify model equivalence. + _verify_param_shape_across_processes(self.process_group, parameters) + # Sync params and buffers. Ensures all DDP models start off at the same value. + _sync_module_states( + module=self.module, + process_group=self.process_group, + broadcast_bucket_size=self.broadcast_bucket_size, + src=0, + params_and_buffers_to_ignore=self.parameters_to_ignore, + broadcast_buffers=self.broadcast_buffers, + ) + # In debug mode, build a mapping of parameter index -> parameter. + param_to_name_mapping = self._build_debug_param_to_name_mapping(parameters) + + # Builds reducer. + self._ddp_init_helper( + parameters, + expect_sparse_gradient, + param_to_name_mapping, + static_graph, + ) + self._comm_hooks: List[Tuple[Callable, object]] = [] + + if self.mixed_precision is not None: + _setup_mixed_precision_params(self.mixed_precision, self.module) + _cast_buffers(self.mixed_precision, self.module) + # Stream used for async low precision copies. + self._mp_stream = torch.cuda.Stream() + self._submodule_to_event = defaultdict(deque) # type: ignore[var-annotated] + # Add forward pre-hook to root module to kick off copies to lower + # precision. + self.module.register_forward_pre_hook( + self._root_copy_hook, prepend=False, with_kwargs=True + ) + # Add forward pre hook to all submodules to wait for copy events + # before running computation. + for module in self.module.modules(): + module.register_forward_pre_hook( + self._module_wait_for_copy_hook, + prepend=False, + with_kwargs=True, + ) + # Set up callbacks in backward to upcast and use full precision + # params. TODO (rohan-varma): Make this compose with general + # comm hooks and apply_optimizer_in_backward. Importing inline to + # avoid circular import issue. + from torch.distributed.algorithms.ddp_comm_hooks.mixed_precision_hooks import ( + _AllreduceUpcastHookState, + _reducer_allreduce_and_upcast_hook, + ) + + upcast_hook_state = _AllreduceUpcastHookState( + ddp_weakref=weakref.ref(self), + upcast_stream=torch.cuda.Stream(), + ) + self.register_comm_hook( + upcast_hook_state, + _reducer_allreduce_and_upcast_hook, + ) + # Inform reducer of reduced precision param dtype for correctness + # of type checks between gradient and bucket. + self.reducer._set_mixed_precision_param_dtype( # type: ignore[attr-defined] + self.mixed_precision.param_dtype + ) + + self._has_rebuilt_buckets = False + + if static_graph: + self._set_static_graph() + + self._lazy_init_ran = False + + # Register the AccumulateGrad post hooks if optimize_ddp is + # True. The hooks will be deregistered if compiled_autograd is not + # enabled. + self._accum_grad_hooks: List[RemovableHandle] = [] + optimize_ddp = torch._dynamo.config._get_optimize_ddp_mode() + self._use_python_reducer = optimize_ddp in ( + "python_reducer", + "python_reducer_without_compiled_forward", + ) + self._force_to_disable_cpp_reducer = ( + optimize_ddp == "python_reducer_without_compiled_forward" + ) + if self._use_python_reducer: + self._register_accum_grad_hook() + + def _register_accum_grad_hook(self): + import torch.distributed._functional_collectives as fcol + + def compiled_accum_grad_hook( + param, + *, + param_index: int, + ): + if not self.require_backward_grad_sync: + return + + if param.grad is None: + return + + if self._comm_hooks: + for hook, state in self._comm_hooks: + hook(state, (param.grad, param)) + else: + gradient = param.grad / self.process_group.size() + gradient = fcol.all_reduce(gradient, "sum", self.process_group) + param.grad.copy_(gradient) + + for index, param in enumerate(self._module_parameters): + self._accum_grad_hooks.append( + param.register_post_accumulate_grad_hook( + functools.partial( + compiled_accum_grad_hook, + param_index=index, + ) + ) + ) + + def _delayed_all_reduce_hook(self, grad): + world_size = dist.get_world_size(self.process_group) + + self._delay_grad_buffer.div_(world_size) # type: ignore[union-attr] + _ = dist.all_reduce( + self._delay_grad_buffer, group=self.process_group, async_op=True + ) + return grad + + def _register_delay_all_reduce_hook( + self, + bucket_cap_mb, + param_to_hook_all_reduce, + device_ids, + ): + # 1. Create gradient buffer + device = torch.device("cpu") if device_ids is None else device_ids[0] + self._delay_grad_buffer = torch.zeros( + sum([p.numel() for p in self._delay_all_reduce_params]), + device=device, + ) + + # 2. Broadcast the parameters + detached_params = [p.detach() for p in self._delay_all_reduce_params] + dist._broadcast_coalesced(self.process_group, detached_params, bucket_cap_mb, 0) + + # 3. Hook all reduce to the specified parameter + param_to_hook_all_reduce.register_hook(self._delayed_all_reduce_hook) + + # 4. Build tensor views for gradients + offset = 0 + for param in self._delay_all_reduce_params: + grad_view = self._delay_grad_buffer[offset : (offset + param.numel())].view( + param.shape + ) + self._delay_grad_views.append(grad_view) + offset = offset + param.numel() + + # 5. Check whether the all reduce of all params requiring grad is delayed. + for module_name, module in self.module.named_modules(): + for param_name, param in module.named_parameters(recurse=False): + if param.requires_grad: + full_name = f"{module_name}.{param_name}" + if full_name not in self.parameters_to_ignore: + # There is at least a param whose all reduce will not be delayed. + # In this case, we should not set self._delay_all_reduce_all_params + # to True. + return + self._delay_all_reduce_all_params = True + + def _setup_in_backward_optimizers(self): + # Check if user has used apply_optim_in_backward to overlap optimizer + # step + DDP backward. Current constraints: + # 1. Only allreduce is supported at the moment, no custom communication. + # 2. For DDP-managed parameters that have their optimizer run in + # backward, their gradients are set to ``None``. If your use case + # requires DDP parameters grad not to be set to ``None`` after their + # in-backward optimizer runs, please ping + # https://github.com/pytorch/pytorch/issues/90052. + # NOTE: we use self._module_parameters instead of .parameters() since + # the former excludes ignored (non-DDP managed) parameters. + if any(hasattr(p, "_in_backward_optimizers") for p in self._module_parameters): + torch._C._log_api_usage_once("ddp.optimizer_in_backward") + # Remove hooks that apply_optim_in_backward had registered because + # DDP customizes how optimizer is overlapped with backward due to + # the allreduce. + param_to_handle_map = ( + dist.optim.apply_optimizer_in_backward.param_to_optim_hook_handle_map + ) + for p in self._module_parameters: + for handle in param_to_handle_map.get(p, []): + handle.remove() + + # Need a weakref to DDP instance to run all_reduce (from reducer) + # and get managed DDP parameters. + ddp_weakref = weakref.ref(self) + # Note: importing in function, otherwise this will cause a circular + # import. + from torch.distributed.algorithms.ddp_comm_hooks.optimizer_overlap_hooks import ( + _apply_optim_in_backward_hook, + ) + + self.register_comm_hook( + ddp_weakref, + _apply_optim_in_backward_hook( + gradient_is_bucket_view=self.gradient_as_bucket_view + ), + ) + + self.reducer._set_optimizer_in_backward() # type: ignore[attr-defined] + + def _fire_reducer_autograd_hook(self, idx, *unused): + """ + Fire the reducer's autograd hook to allreduce params in a Reducer bucket. + + Note that this is only used during mixed precision training as the + Reducer's hooks installed during construction time would not be called + as we're working in the low precision parameter setting. + """ + self.reducer._autograd_hook(idx) # type: ignore[attr-defined] + + def _root_copy_hook(self, *args: Any, **kwargs: Any) -> None: + """ + For DDP mixed precision, put low precision copies on separate stream and create events to wait for them. + + When training with DDP mixed precision, this root pre-forward hook kicks + off low precision copies on a separate stream and creates respective + events to wait for them. + """ + # Clear out previous iteration submodule to event. This is because we + # may have populated some events for modules that didn't end up being + # used. + self._submodule_to_event = defaultdict(deque) # type: ignore[var-annotated] + with torch.cuda.stream(self._mp_stream): + for submodule in self.module.modules(): + for param in submodule.parameters(recurse=False): + # Do not cast DDP ignored parameters. + if hasattr(param, "_ddp_ignored") and param._ddp_ignored: + continue + _alloc_storage(param._mp_param, param.size()) + # copy() implicitly casts to low precision + with torch.no_grad(): + param._mp_param.copy_(param.data) + # TODO: when zero_grad(set_to_none=False) or in grad + # accumulation case, accumulated grads can be in fp32 + # which can cause errors when running DDP backwards due + # to mismatched incoming and accumulated gradient types. + # So we manually cast the accumulated grad down for now, + # in the future we may shift to FSDP style gradient + # accumulation management where the accumulated gradient + # is saved and .grad field is set to None, bypassing + # this issue. + if param.grad is not None: + param.grad.data = param.grad.to( + self.mixed_precision.param_dtype # type: ignore[union-attr] + ) + param.data = param._mp_param + copy_event = torch.cuda.Event() + copy_event.record() + self._submodule_to_event[submodule].append(copy_event) + + def _module_wait_for_copy_hook( + self, + module, + *args: Any, + **kwargs: Any, + ) -> None: + """Before carrying out computation, wait on the appropriate event to ensure low precision copies have finished.""" + try: + event = self._submodule_to_event[module].popleft() + except IndexError: + # copy event has already been waited on + return + + event.wait(stream=torch.cuda.current_stream()) + for p in module.parameters(recurse=False): + # Don't register hooks if param does not require grad + if not p.requires_grad or (hasattr(p, "_ddp_ignored") and p._ddp_ignored): + continue + # We need to register autograd hook here instead of DDP's ctor + # since we're working with the low precision param. Register them + # via obtaining the gradient accumulator. + tmp = p.expand_as(p) + grad_acc = tmp.grad_fn.next_functions[0][0] + + hook = grad_acc.register_hook( + functools.partial(self._fire_reducer_autograd_hook, p._idx) + ) + p._ddp_mp_hook_state = (grad_acc, hook) + + def _log_and_throw(self, err_type, err_msg): + if self.logger is not None: + self.logger.set_error_and_log(f"{str(err_type)}: {err_msg}") + raise err_type(err_msg) + + def _ddp_init_helper( + self, + parameters, + expect_sparse_gradient, + param_to_name_mapping, + static_graph, + ): + """ + DDP init helper function to manage parameters, grad hooks, logging, and SyncBatchNorm. + + Initialization helper function that does the following: + (1) bucketing the parameters for reductions + (2) resetting the bucketing states + (3) registering the grad hooks + (4) Logging construction-time DDP logging data + (5) passing a handle of DDP to SyncBatchNorm Layer + """ + # Notice, the parameters order is not in the order in which they are used, + # especially in models with control flow. + # + # Alongside parameters are not presented in the real execution order, + # if a certain model happens to also + # 1) have other collectives comm ops in its backward graph. + # 2) have unused parameter in subset ranks of the whole world. + # bucketing could insert ALL-REDUCE comm op too early on the rank with unused parameter, + # matching up with other collectives comm ops on other ranks unexpectedly. + # + # In order to handle this corner case, when the parameters are not in the real execution order, + # we don't do bucketing, thus only one ALL-REDUCE is inserted after all the gradients + # of the whole graph are computed. + # + # Notice, here we only disable bucketing for the first iteration. + # After the first iteration, it's OK to rebuild buckets, + # because "bucket rebuild" bucketizes parameters based on its real execution order in backward graph. + + # Can remove this branching once #73732 is landed. + if static_graph is True or self.find_unused_parameters is False: + bucket_size_limits = [sys.maxsize] + else: + bucket_size_limits = [ + dist._DEFAULT_FIRST_BUCKET_BYTES, + self.bucket_bytes_cap, + ] + ( + bucket_indices, + per_bucket_size_limits, + ) = dist._compute_bucket_assignment_by_size( + parameters, + bucket_size_limits, + expect_sparse_gradient, + ) + + # Remember index for parameters if we are in mixed precision, as we + # need to pass in index to Reducer's autograd hook via python. + if self.mixed_precision is not None: + for i, p in enumerate(parameters): + p._idx = i + + # Note: reverse list of buckets because we want to approximate the + # order in which their gradients are produced, and assume they + # are used in the forward pass in the order they are defined. + self.reducer = dist.Reducer( + parameters, + list(reversed(bucket_indices)), + list(reversed(per_bucket_size_limits)), + self.process_group, + expect_sparse_gradient, + # The bucket size limit is specified in the constructor. + # Additionally, we allow for a single small bucket for parameters + # that are defined first, such that their gradients don't spill into + # a much larger bucket, adding unnecessary latency after gradient + # computation finishes. Experiments showed 1MB is a reasonable value. + self.bucket_bytes_cap, + self.find_unused_parameters, + self.gradient_as_bucket_view, + param_to_name_mapping, + # User can set dist._DEFAULT_FIRST_BUCKET_BYTES to tune DDP first + # bucket. + dist._DEFAULT_FIRST_BUCKET_BYTES, + ) + + self.logger = dist.Logger(self.reducer) + # Set as a weak reference to avoid reference cycle between + # logger and reducer. + self.reducer.set_logger(self.logger) + + has_sync_bn = False + for submodule in self.module.modules(): + if isinstance(submodule, torch.nn.SyncBatchNorm): + has_sync_bn = True + break + + # Set logging data that can be got during construction time. + self.logger.set_construction_data_and_log( + self.module.__class__.__name__, + [] if self.device_ids is None else self.device_ids, + -1 if self.output_device is None else self.output_device, + self.broadcast_buffers, + has_sync_bn, + static_graph, + ) + + # passing a handle to torch.nn.SyncBatchNorm layer + self._passing_sync_batchnorm_handle(self.module) + + def __getstate__(self): + self._check_default_group() + attrs = copy.copy(self.__dict__) + del attrs["process_group"] + del attrs["reducer"] + del attrs["logger"] + return attrs + + def __setstate__(self, state): + # If serializable, then the process group should be the default one + self.process_group = _get_default_group() + super().__setstate__(state) + self.__dict__.setdefault("require_forward_param_sync", True) + self.__dict__.setdefault("require_backward_grad_sync", True) + parameters, expect_sparse_gradient = self._build_params_for_reducer() + # In debug mode, build a mapping of parameter index -> parameter. + param_to_name_mapping = self._build_debug_param_to_name_mapping(parameters) + # Builds reducer. + self._ddp_init_helper( + parameters, + expect_sparse_gradient, + param_to_name_mapping, + self.static_graph, + ) + if self.static_graph: + self.reducer._set_static_graph() + assert self.logger is not None + self.logger._set_static_graph() + + def _build_params_for_reducer(self): + # Build tuple of (module, parameter) for all parameters that require grads. + modules_and_parameters = [ + (module, parameter) + for module_name, module in self.module.named_modules() + for parameter in [ + param + # Note that we access module.named_parameters instead of + # parameters(module). parameters(module) is only needed in the + # single-process multi device case, where it accesses replicated + # parameters through _former_parameters. + for param_name, param in module.named_parameters(recurse=False) + if param.requires_grad + and f"{module_name}.{param_name}" not in self.parameters_to_ignore + ] + ] + + # Deduplicate any parameters that might be shared across child modules. + memo = set() + modules_and_parameters = [ + # "p not in memo" is the deduplication check. + # "not memo.add(p)" is always True, and it's only there to cause "add(p)" if needed. + (m, p) + for m, p in modules_and_parameters + if p not in memo and not memo.add(p) # type: ignore[func-returns-value] + ] + + # Build list of parameters. + parameters = [parameter for _, parameter in modules_and_parameters] + + # Checks if a module will produce a sparse gradient. + def produces_sparse_gradient(module): + if isinstance(module, (torch.nn.Embedding, torch.nn.EmbeddingBag)): + return module.sparse + return False + + # Build list of booleans indicating whether or not to expect sparse + # gradients for the corresponding parameters. + expect_sparse_gradient = [ + produces_sparse_gradient(module) for module, _ in modules_and_parameters + ] + + self._assign_modules_buffers() + + return parameters, expect_sparse_gradient + + def _assign_modules_buffers(self): + """ + Assign self.module.named_buffers to self.modules_buffers. + + Assigns module buffers to self.modules_buffers which are then used to + broadcast across ranks when broadcast_buffers=True. Note that this + must be called every time buffers need to be synced because buffers can + be reassigned by user module, + see https://github.com/pytorch/pytorch/issues/63916. + """ + # Collect buffers for modules, filtering out buffers that should be ignored. + named_module_buffers = [ + (buffer, buffer_name) + for buffer_name, buffer in self.module.named_buffers() + if buffer_name not in self.parameters_to_ignore + ] + self.modules_buffers = [ + buffer for (buffer, buffer_name) in named_module_buffers + ] + # Dict[str, tensor] representing module buffers not ignored by DDP. + self.named_module_buffers = { + buffer_name: buffer for (buffer, buffer_name) in named_module_buffers + } + + def _build_debug_param_to_name_mapping(self, parameters): + param_to_param_index = {parameters[i]: i for i in range(len(parameters))} + param_set = set(parameters) + param_index_to_param_fqn = {} + for module_name, module in self.module.named_modules(): + for param_name, param in module.named_parameters(recurse=False): + fqn = f"{module_name}.{param_name}" + # Bypass ignored parameters since those are not reduced by DDP + # to begin with. + if fqn not in self.parameters_to_ignore and param.requires_grad: + if param not in param_set: + self._log_and_throw( + ValueError, + f"Param with name {fqn} found in module parameters, but not DDP parameters." + " This indicates a bug in DDP, please report an issue to PyTorch.", + ) + param_index = param_to_param_index[param] + param_index_to_param_fqn[param_index] = fqn + + # Ensure we covered all parameters + if len(param_set) != len(param_index_to_param_fqn): + self._log_and_throw( + ValueError, + ( + "Expected param to name mapping to cover all parameters, but" + f" got conflicting lengths: {len(param_set)} vs " + f"{len(param_index_to_param_fqn)}. This indicates a bug in DDP" + ", please report an issue to PyTorch." + ), + ) + + return param_index_to_param_fqn + + def _get_parameters(self, m, recurse=True): + """Return a generator of module parameters.""" + + def model_parameters(m): + ps = ( + m._former_parameters.values() + if hasattr(m, "_former_parameters") + else m.parameters(recurse=False) + ) + yield from ps + + for mod in m.modules() if recurse else [m]: + yield from model_parameters(mod) + + def _check_default_group(self): + pickle_not_supported = False + try: + if self.process_group != _get_default_group(): + pickle_not_supported = True + except RuntimeError: + pickle_not_supported = True + + if pickle_not_supported: + self._log_and_throw( + RuntimeError, + "DDP Pickling/Unpickling are only supported " + "when using DDP with the default process " + "group. That is, when you have called " + "init_process_group and have not passed " + "process_group argument to DDP constructor", + ) + + @contextmanager + def no_sync(self): + r""" + Context manager to disable gradient synchronizations across DDP processes. + + Within this context, gradients will be accumulated on module + variables, which will later be synchronized in the first + forward-backward pass exiting the context. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg) + >>> with ddp.no_sync(): + >>> for input in inputs: + >>> ddp(input).backward() # no synchronization, accumulate grads + >>> ddp(another_input).backward() # synchronize grads + + .. warning:: + The forward pass should be included inside the context manager, or + else gradients will still be synchronized. + """ + old_require_backward_grad_sync = self.require_backward_grad_sync + self.require_backward_grad_sync = False + try: + yield + finally: + self.require_backward_grad_sync = old_require_backward_grad_sync + + @classmethod + def _get_active_ddp_module(cls): + """`TorchDynamo` requires DDP's status and module for cooperative optimization.""" + return cls._active_ddp_module + + # note, this ctxmgr function is marked 'skip' in torchdynamo, so dynamo only kicks in + # for the 'module_to_run' underneath + # see torch._dynamo/eval_frame.py TorchPatcher.patch for more details + @contextmanager + @torch._disable_dynamo(recursive=False) + def _inside_ddp_forward(self): + DistributedDataParallel._active_ddp_module = self + try: + yield + finally: + DistributedDataParallel._active_ddp_module = None + + def _run_ddp_forward(self, *inputs, **kwargs): + if self._use_python_reducer: + return self.module(*inputs, **kwargs) # type: ignore[index] + else: + with self._inside_ddp_forward(): + return self.module(*inputs, **kwargs) # type: ignore[index] + + def _clear_grad_buffer(self): + # Making param.grad points to the grad buffers before backward is based on the + # assumption that the grad accumulation is done in place in autograd engine, + # for some edge cases, if the grad accumulation in autograd engine is not in + # place, then the param.grad and grad buffers are detached. + if self._delay_grad_buffer is not None: + # We batch zero_grad for all params by resetting the whole grad + # buffer when the grad of all params is set to None. + all_param_grad_none = all( + param.grad is None for param in self._delay_all_reduce_params + ) + + for index, param in enumerate(self._delay_all_reduce_params): + if param.grad is None: + param.grad = self._delay_grad_views[index] + if not all_param_grad_none: + param.grad.zero_() + + if all_param_grad_none: + self._delay_grad_buffer.zero_() + + def _lazy_init(self): + # Initialization for DDP that occurs after construction, but lazily + # before the first forward pass. + self._setup_in_backward_optimizers() + self._lazy_init_ran = True + + def _should_disable_cpp_reducer(self) -> bool: + return self._use_python_reducer and ( + torch._utils.is_compiling() or self._force_to_disable_cpp_reducer + ) + + def _pre_forward(self, *inputs, **kwargs): + if self._should_disable_cpp_reducer(): + return inputs, kwargs + + # Disable the python reducer if compiled_autograd is not enabled. + if self._accum_grad_hooks: + for index, h in enumerate(self._accum_grad_hooks): + h.remove() + self._accum_grad_hooks.clear() + + if not self._lazy_init_ran and not torch._utils.is_compiling(): + self._lazy_init() + + if self._delay_all_reduce_all_params: + return inputs, kwargs + + if torch.is_grad_enabled() and self.require_backward_grad_sync: + assert self.logger is not None + self.logger.set_runtime_stats_and_log() + self.reducer.prepare_for_forward() + + # Notify the join context that this process has not joined, if + # needed + work = Join.notify_join_context(self) + if work: + self.reducer._set_forward_pass_work_handle( + work, self._divide_by_initial_world_size # type: ignore[arg-type] + ) + + # Calling _rebuild_buckets before forward computation, + # It may allocate new buckets before deallocating old buckets + # inside _rebuild_buckets. To save peak memory usage, + # call _rebuild_buckets before the peak memory usage increases + # during forward computation. + # This should be called only once during whole training period. + if torch.is_grad_enabled() and self.reducer._rebuild_buckets(): + logger.info("Reducer buckets have been rebuilt in this iteration.") + self._has_rebuilt_buckets = True + + # sync params according to location (before/after forward) user + # specified as part of hook, if hook was specified. + if self._check_sync_bufs_pre_fwd(): + self._sync_buffers() + + if self._join_config.enable: + # Notify joined ranks whether they should sync in backwards pass or not. + self._check_global_requires_backward_grad_sync(is_joined_rank=False) + + if self.device_ids: + moved_inputs, moved_kwargs = _to_kwargs( + inputs, + kwargs, + torch.device(self.device_type, self.device_ids[0]), + self.use_side_stream_for_tensor_copies, + ) + args, kwargs = moved_inputs[0], moved_kwargs[0] + # Cast inputs to reduced precision if needed. + if self.mixed_precision is not None: + args, kwargs = _cast_forward_inputs( + self.mixed_precision.param_dtype, + *args, + **kwargs, + ) + return args, kwargs + else: + # Cast inputs to reduced precision if needed. + # TODO (rohan-varma) test this codepath. + if self.mixed_precision is not None: + inputs, kwargs = _cast_forward_inputs( + self.mixed_precision.param_dtype, + *inputs, + **kwargs, + ) + return inputs, kwargs + + def _post_forward(self, output): + if self._should_disable_cpp_reducer(): + return output + + if self._delay_all_reduce_all_params: + self._clear_grad_buffer() + return output + + # sync params according to location (before/after forward) user + # specified as part of hook, if hook was specified. + if self._check_sync_bufs_post_fwd(): + self._sync_buffers() + + if torch.is_grad_enabled() and self.require_backward_grad_sync: + self.require_forward_param_sync = True + # We'll return the output object verbatim since it is a freeform + # object. We need to find any tensors in this object, though, + # because we need to figure out which parameters were used during + # this forward pass, to ensure we short circuit reduction for any + # unused parameters. Only if `find_unused_parameters` is set. + if self.find_unused_parameters and not self.static_graph: + # Do not need to populate this for static graph. + self.reducer.prepare_for_backward(list(_find_tensors(output))) + else: + self.reducer.prepare_for_backward([]) + else: + self.require_forward_param_sync = False + + # TODO: DDPSink is currently enabled for unused parameter detection and + # static graph training for first iteration. + if (self.find_unused_parameters and not self.static_graph) or ( + self.static_graph and not self._static_graph_delay_allreduce_enqueued + ): + ( + output_tensor_list, + treespec, + output_is_rref, + ) = _tree_flatten_with_rref(output) + output_placeholders = [None for _ in range(len(output_tensor_list))] + # Do not touch tensors that have no grad_fn, which can cause issues + # such as https://github.com/pytorch/pytorch/issues/60733 + for i, output in enumerate(output_tensor_list): + if torch.is_tensor(output) and output.grad_fn is None: + output_placeholders[i] = output + + # When find_unused_parameters=True, makes tensors which require grad + # run through the DDPSink backward pass. When not all outputs are + # used in loss, this makes those corresponding tensors receive + # undefined gradient which the reducer then handles to ensure + # param.grad field is not touched and we don't error out. + passthrough_tensor_list = _DDPSink.apply( + weakref.ref(self), + *output_tensor_list, + ) + for i in range(len(output_placeholders)): + if output_placeholders[i] is None: + output_placeholders[i] = passthrough_tensor_list[i] + + # Reconstruct output data structure. + output = _tree_unflatten_with_rref( + output_placeholders, treespec, output_is_rref + ) + + # At the end of the forward pass, reset the grad buffer and grad views + self._clear_grad_buffer() + return output + + def forward(self, *inputs, **kwargs): + with torch.autograd.profiler.record_function("DistributedDataParallel.forward"): + inputs, kwargs = self._pre_forward(*inputs, **kwargs) + output = ( + self.module.forward(*inputs, **kwargs) + if self._delay_all_reduce_all_params + else self._run_ddp_forward(*inputs, **kwargs) + ) + return self._post_forward(output) + + def scatter(self, inputs, kwargs, device_ids): + return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) + + def to_kwargs(self, inputs, kwargs, device_id): + # Kept for BC + return _to_kwargs( + inputs, + kwargs, + torch.device(self.device_type, device_id), + self.use_side_stream_for_tensor_copies, + ) + + def gather(self, outputs, output_device): + return gather(outputs, output_device, dim=self.dim) + + def train(self, mode=True): + super().train(mode) + return self + + # When running in join mode, schedules an allreduce to notify joined ranks + # of whether backwards pass synchronization will run this iteration or not. + def _check_global_requires_backward_grad_sync(self, is_joined_rank): + if not is_joined_rank and self.require_backward_grad_sync: + requires_sync_tensor = torch.ones(1, device=self.device) + else: + requires_sync_tensor = torch.zeros(1, device=self.device) + + work = dist.all_reduce( + requires_sync_tensor, group=self.process_group, async_op=True + ) + + # (kwen2501) This if condition is a plain translation of previous + # behavior, i.e. in the `is_joined_rank=False` case, `work.wait()` + # is not called and it doesn't care about the result. I am guessing + # that it just wants to fire a matching all-reduce and does not want + # the main stream to wait. + if is_joined_rank: + work.wait() + should_sync_backwards = requires_sync_tensor.item() != 0 + return should_sync_backwards + else: + return None # Return value is not/should not be used. + + # When running in join mode, checks and performs sync of module buffers if + # the models have buffers that should be synchronized in the forward pass. + def _check_and_sync_module_buffers(self): + if self._check_sync_bufs_pre_fwd(): + authoritative_rank = self._find_common_rank(self._distributed_rank, False) + self._sync_module_buffers(authoritative_rank) + + # When running in join model, agrees upon a common rank and broadcast model + # parameters to all other ranks. + def _sync_final_model(self, is_last_joiner): + # Agree upon the process that will be the authoritative model copy. + # The current rank is a candidate for being the authoritative copy if + # is_last_joiner=True. We break ties via picking the larger rank. + self._authoritative_rank = self._find_common_rank( + self._distributed_rank, is_last_joiner + ) + _sync_module_states( + module=self.module, + process_group=self.process_group, + broadcast_bucket_size=self.broadcast_bucket_size, + src=self._authoritative_rank, + params_and_buffers_to_ignore=self.parameters_to_ignore, + broadcast_buffers=self.broadcast_buffers, + ) + + # Schedule comm ops to match those scheduled in the reducer's backward + # pass. + def _match_all_reduce_for_bwd_pass(self): + comm_work = [] + # Schedule comm in the same order as Reducer schedules them, i.e. + # the order of the buckets. Retrieving the bucket order from the reducer + # ensures that we keep the same order in join mode, such as when bucket + # order is rebuilt dynamically. + + # Returns grad_buckets in order, but real tensors are substituted with + # zero tensors of the same shape. + grad_buckets = self.reducer._get_zeros_like_grad_buckets() + for grad_bucket in grad_buckets: + # Joined processes contribute zero gradient. In the case that + # divide_by_initial_world_size=True, we divide grads by the static + # world size, if not, the dividing factor is reduced by the number + # of joined processes. + work = self.reducer._run_comm_hook(grad_bucket) + comm_work.append(work) + for work in comm_work: + work.wait() + + # Allreduces the used parameter mapping across ranks. + def _match_unused_params_allreduce(self): + locally_used_param_map = self.reducer._get_local_used_map() + self.process_group.allreduce(locally_used_param_map) + + def join( + self, + divide_by_initial_world_size: bool = True, + enable: bool = True, + throw_on_early_termination: bool = False, + ): + r""" + Context manager for training with uneven inputs across processes in DDP. + + This context manager will keep track of already-joined DDP processes, + and "shadow" the forward and backward passes by inserting collective + communication operations to match with the ones created by non-joined + DDP processes. This will ensure each collective call has a corresponding + call by already-joined DDP processes, preventing hangs or errors that + would otherwise happen when training with uneven inputs across + processes. Alternatively, if the flag ``throw_on_early_termination`` is + specified to be ``True``, all trainers will throw an error once one rank + runs out of inputs, allowing these errors to be caught and handled + according to application logic. + + Once all DDP processes have joined, the context manager will broadcast + the model corresponding to the last joined process to all processes to + ensure the model is the same across all processes + (which is guaranteed by DDP). + + To use this to enable training with uneven inputs across processes, + simply wrap this context manager around your training loop. No further + modifications to the model or data loading is required. + + .. warning:: + If the model or training loop this context manager is wrapped around + has additional distributed collective operations, such as + ``SyncBatchNorm`` in the model's forward pass, then the flag + ``throw_on_early_termination`` must be enabled. This is because this + context manager is not aware of non-DDP collective communication. + This flag will cause all ranks to throw when any one rank + exhausts inputs, allowing these errors to be caught and recovered + from across all ranks. + + Args: + divide_by_initial_world_size (bool): If ``True``, will divide + gradients by the initial ``world_size`` DDP training was launched + with. If ``False``, will compute the effective world size + (number of ranks that have not depleted their inputs yet) and + divide gradients by that during allreduce. Set + ``divide_by_initial_world_size=True`` to ensure every input + sample including the uneven inputs have equal weight in terms of + how much they contribute to the global gradient. This is + achieved by always dividing the gradient by the initial + ``world_size`` even when we encounter uneven inputs. If you set + this to ``False``, we divide the gradient by the remaining + number of nodes. This ensures parity with training on a smaller + ``world_size`` although it also means the uneven inputs would + contribute more towards the global gradient. Typically, you + would want to set this to ``True`` for cases where the last few + inputs of your training job are uneven. In extreme cases, where + there is a large discrepancy in the number of inputs, setting + this to ``False`` might provide better results. + enable (bool): Whether to enable uneven input detection or not. Pass + in ``enable=False`` to disable in cases where you know that + inputs are even across participating processes. Default is + ``True``. + throw_on_early_termination (bool): Whether to throw an error + or continue training when at least one rank has exhausted + inputs. If ``True``, will throw upon the first rank reaching end + of data. If ``False``, will continue training with a smaller + effective world size until all ranks are joined. Note that if + this flag is specified, then the flag + ``divide_by_initial_world_size`` would be ignored. Default + is ``False``. + + + Example:: + + >>> # xdoctest: +SKIP("Distributed") + >>> import torch + >>> import torch.distributed as dist + >>> import os + >>> import torch.multiprocessing as mp + >>> import torch.nn as nn + >>> # On each spawned worker + >>> def worker(rank): + >>> dist.init_process_group("nccl", rank=rank, world_size=2) + >>> torch.cuda.set_device(rank) + >>> model = nn.Linear(1, 1, bias=False).to(rank) + >>> model = torch.nn.parallel.DistributedDataParallel( + >>> model, device_ids=[rank], output_device=rank + >>> ) + >>> # Rank 1 gets one more input than rank 0. + >>> inputs = [torch.tensor([1]).float() for _ in range(10 + rank)] + >>> with model.join(): + >>> for _ in range(5): + >>> for inp in inputs: + >>> loss = model(inp).sum() + >>> loss.backward() + >>> # Without the join() API, the below synchronization will hang + >>> # blocking for rank 1's allreduce to complete. + >>> torch.cuda.synchronize(device=rank) + """ + return Join( + [self], + enable, + throw_on_early_termination, + divide_by_initial_world_size=divide_by_initial_world_size, + ) + + def join_hook( + self, + **kwargs, + ): + r""" + DDP join hook enables training on uneven inputs by mirroring communications in forward and backward passes. + + Arguments: + kwargs (dict): a :class:`dict` containing any keyword arguments + to modify the behavior of the join hook at run time; all + :class:`Joinable` instances sharing the same join context + manager are forwarded the same value for ``kwargs``. + + The hook supports the following keyword arguments: + divide_by_initial_world_size (bool, optional): + If ``True``, then gradients are divided by the initial world + size that DDP was launched with. + If ``False``, then gradients are divided by the effective world + size (i.e. the number of non-joined processes), meaning that + the uneven inputs contribute more toward the global gradient. + Typically, this should be set to ``True`` if the degree of + unevenness is small but can be set to ``False`` in extreme + cases for possibly better results. + Default is ``True``. + """ + divide_by_initial_world_size = kwargs.get("divide_by_initial_world_size", True) + return _DDPJoinHook( + self, divide_by_initial_world_size=divide_by_initial_world_size + ) + + @property + def join_device(self): + return self.device + + @property + def join_process_group(self): + return self.process_group + + def _register_buffer_comm_hook( + self, + state, + hook: Callable, + comm_hook_location=_BufferCommHookLocation.POST_FORWARD, + ): + r""" + Allow custom registration of hooks that define how buffer are synchronized across ranks. + + The hook takes in an optional state and is passed in a Dict[str, Tensor] + corresponding to buffer names and the buffers, and can run arbitrary reductions + on buffers as opposed to DDP's default broadcast from rank 0. This is useful for + example if a counter needs to be summed or averaged across ranks every iteration. + + Args: + state (Any): Optional state that is passed to the hook. + hook (Callable): Callable with the following signature: + ``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]`` + comm_hook_location (_BufferCommHookLocation): Enum value indicating + where to run the hook. + _BufferCommHookLocation.PRE_FORWARD means that the + hook will run _before_ the forward pass, and + _BufferCommHookLocation.POST_FORWARD means that the + hook will run _after_ the forward pass. + + NOTE: To maximize performance, users can return a + List[torch.futures.Future] from their hook, and DDP will + install and await these hooks appropriately at the end of + the backward pass. This will ensure all buffers are + synchronized by the end of the backward pass. If this + setting is used, it is recommended to pass + comm_hook_location=_BufferCommHookLocation.POST_FORWARD, + which will trigger the hook after the forward pass. + If _BufferCommHookLocation.PRE_FORWARD is used, users must + ensure appropriate synchronization when manipulating GPU + buffers in the forward pass. + """ + assert callable(hook) + self.buffer_hook = _BufferCommHook( + buffer_comm_hook=hook, + buffer_comm_hook_state=state, + buffer_comm_hook_location=comm_hook_location, + ) + + def register_comm_hook(self, state: object, hook: Callable): + r""" + Register communication hook for user-defined DDP aggregation of gradients across multiple workers. + + This hook would be very useful for researchers to try out new ideas. For + example, this hook can be used to implement several algorithms like GossipGrad + and gradient compression which involve different communication strategies for + parameter syncs while running Distributed DataParallel training. + + Args: + state (object): Passed to the hook to maintain any state information during the training process. + Examples include error feedback in gradient compression, + peers to communicate with next in GossipGrad, etc. + + It is locally stored by each worker + and shared by all the gradient tensors on the worker. + hook (Callable): Callable with the following signature: + ``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]``: + + This function is called once the bucket is ready. The + hook can perform whatever processing is needed and return + a Future indicating completion of any async work (ex: allreduce). + If the hook doesn't perform any communication, it still + must return a completed Future. The Future should hold the + new value of grad bucket's tensors. Once a bucket is ready, + c10d reducer would call this hook and use the tensors returned + by the Future and copy grads to individual parameters. + Note that the future's return type must be a single tensor. + + We also provide an API called ``get_future`` to retrieve a + Future associated with the completion of ``c10d.ProcessGroup.Work``. + ``get_future`` is currently supported for NCCL and also supported for most + operations on GLOO and MPI, except for peer to peer operations (send/recv). + + .. warning :: + Grad bucket's tensors will not be predivided by world_size. User is responsible + to divide by the world_size in case of operations like allreduce. + + .. warning :: + DDP communication hook can only be registered once and should be registered + before calling backward. + + .. warning :: + The Future object that hook returns should contain a single tensor + that has the same shape with the tensors inside grad bucket. + + .. warning :: + ``get_future`` API supports NCCL, and partially GLOO and MPI backends (no support + for peer-to-peer operations like send/recv) and will return a ``torch.futures.Future``. + + Example:: + Below is an example of a noop hook that returns the same tensor. + + >>> # xdoctest: +SKIP('undefined name') + >>> def noop(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]: + >>> fut = torch.futures.Future() + >>> fut.set_result(bucket.buffer()) + >>> return fut + >>> ddp.register_comm_hook(state=None, hook=noop) + + Example:: + Below is an example of a Parallel SGD algorithm where gradients are encoded before + allreduce, and then decoded after allreduce. + + >>> # xdoctest: +SKIP('undefined name') + >>> def encode_and_decode(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]: + >>> encoded_tensor = encode(bucket.buffer()) # encode gradients + >>> fut = torch.distributed.all_reduce(encoded_tensor).get_future() + >>> # Define the then callback to decode. + >>> def decode(fut): + >>> decoded_tensor = decode(fut.value()[0]) # decode gradients + >>> return decoded_tensor + >>> return fut.then(decode) + >>> ddp.register_comm_hook(state=None, hook=encode_and_decode) + """ + self._check_comm_hook(hook) + if hook.__name__ in ["bf16_compress_hook", "fp16_compress_hook"]: + # If we pass None, then the hook will try to get the world size + # by calling `dist.group.WORLD.size()`, which causes compilation + # errors. So we pre-decode the process group and pass it to the + # hook. + if state is None: + state = dist.group.WORLD + assert self.logger is not None + self.logger._set_comm_hook_name(hook.__qualname__) + self._comm_hooks.append((hook, state)) + dist._register_comm_hook(self.reducer, state, hook) + + def _register_builtin_comm_hook(self, comm_hook_type): + r""" + Register a built-in communication hook that specifies how DDP aggregates gradients across multiple workers. + + The built-in hooks aim to provide efficient C++ implementations for certain hooks, + which might not be as efficient if implemented in Python using a Python communication hook. + + Args: + comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as ALLREDUCE, FP16_COMPRESS, etc. + + .. warning :: + DDP communication hook can only be registered once and should be registered + before calling backward. + + Example:: + Below is an example of a FP16 compression where gradients are + compressed into 16-bit floating-point numbers before allreduce, and + then decompressed after allreduce. + + >>> # xdoctest: +SKIP('undefined name') + >>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS) + + """ + assert self.logger is not None + self.logger._set_comm_hook_name(str(comm_hook_type)) + dist._register_builtin_comm_hook(self.reducer, comm_hook_type) + + def _register_fused_optim(self, optim: Type, *args, optim_params=None, **kwargs): + r""" + Register an optimizer in DDP to optimize parameter immediately after its gradient reduction. + + Registers an optimizer with DDP such that the optimization for a + parameter will run immediately when that parameter's gradient is + finished with reduction, instead of waiting for all parameters' + gradients to finish reduction. This can result in a training speedup + depending on your workload since the optimizer can run while gradient + reduction for other parameters are still ongoing. In addition, this has + the potential to reduce peak memory consumption during training, as it + only needs to load the per-parameter optimizer states of a single + parameter at a time, instead of loading all per-parameter optimizer + states at once. + + Args: + optim (Type): a ``torch.optim.Optimizer`` class to be registered + as a fused optimizer. + *args (Sequence[Any]): Arguments to forward to `optim`. + optim_params (Optional[Iterable[torch.Tensor]]): Set of parameters + to optimize, similar to `params` argument of traditional `torch.optim` + Optimizers. If this is omitted, all DDP model parameters will be + optimized. + **kwargs: (Dict[str, Any]): Keyword arguments to forward to `optim`. + + .. warning :: + _register_fused_optim should only be called once on a DDP instance, + and registering multiple fused optimizers for the same DDP model + is not currently supported. Please ping + https://github.com/pytorch/pytorch/issues/71595 if this is necessary + for your use case. + + .. warning :: + _register_fused_optim and register_comm_hook currently do not + compose together, meaning that custom DDP communication hooks are + not supported with overlapped optimizers. Please ping + https://github.com/pytorch/pytorch/issues/71595 if this is necessary + for your use case. + + .. warning :: + Gradient accumulation and DDP `no_sync` are currently not supported + with overlapped optimizer. Please ping + https://github.com/pytorch/pytorch/issues/71595 if this is necessary + for your use case. + + Example:: + + >>> # xdoctest: +SKIP("No rendezvous handler") + >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...') + >>> net = torch.nn.parallel.DistributedDataParallel(model, pg) + >>> lr = 1e-2 + >>> betas = (0.9, 0.99) + >>> eps = 1e-6 + >>> net._register_fused_optim(torch.optim.Adam, lr, betas=betas, eps=eps) + >>> # Example with subset of parameters + >>> params_to_opt = [list(net.parameters())[0]] + >>> net._register_fused_optim( + ... torch.optim.Adam, lr, optim_params=params_to_opt, betas=betas, eps=eps + ... ) + """ + # Note: importing in function, otherwise this will cause a circular + # import as optimizer_overlap module needs to import DistributedDataParallel. + from torch.distributed.algorithms._optimizer_overlap import _as_overlapped_optim + + overlapped_optim = _as_overlapped_optim(optim, optim_params, *args, **kwargs) + try: + overlapped_optim.register_ddp(self) + except NotImplementedError as e: + raise RuntimeError( + f"{optim} does not support overlapped DDP. Please file an issue to PyTorch or the respective owner of {optim}." + ) from e + + def _distributed_broadcast_coalesced( + self, tensors, buffer_size, authoritative_rank=0 + ): + dist._broadcast_coalesced( + self.process_group, tensors, buffer_size, authoritative_rank + ) + + def _check_sync_bufs_post_fwd(self): + return ( + self.will_sync_module_buffers() + and hasattr(self, "buffer_hook") + and self.buffer_hook.buffer_comm_hook_location + == _BufferCommHookLocation.POST_FORWARD + ) + + def _check_sync_bufs_pre_fwd(self): + return self.will_sync_module_buffers() and ( + not hasattr(self, "buffer_hook") + or self.buffer_hook.buffer_comm_hook_location + == _BufferCommHookLocation.PRE_FORWARD + ) + + def will_sync_module_buffers(self): + return ( + self.require_forward_param_sync + and self.broadcast_buffers + and len(self.modules_buffers) > 0 + ) + + def _find_common_rank(self, input_rank, rank_cond): + # -1 indicates that this rank is not under consideration to be the + # common_rank + rank_to_use = torch.tensor( + [input_rank if rank_cond else -1], + device=self.device, + ) + dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group) + if rank_to_use.item() == -1: + self._log_and_throw( + ValueError, + "BUG! Expected rank_cond to be true for at least one process." + " This indicates a bug in PyTorch, please report an issue.", + ) + return rank_to_use.item() + + def _sync_buffers(self): + with torch.no_grad(): + # module buffer sync + # Synchronize buffers across processes. + # If we are running DDP with the join manager, we have to agree + # upon a rank to sync module buffers from, since rank 0 may + # already have been joined and have stale module buffers. + if self._join_config.enable: + authoritative_rank = self._find_common_rank( + self._distributed_rank, True + ) + else: + # The process with rank 0 is considered the authoritative copy. + authoritative_rank = 0 + # Update self.modules_buffers incase any buffers were + # reassigned. + self._assign_modules_buffers() + self._sync_module_buffers(authoritative_rank) + + def _sync_module_buffers(self, authoritative_rank): + if not hasattr(self, "buffer_hook"): + self._default_broadcast_coalesced(authoritative_rank=authoritative_rank) + else: + hook = self.buffer_hook.buffer_comm_hook + state = self.buffer_hook.buffer_comm_hook_state + futs = hook(state, self.named_module_buffers) + if futs is not None: + self.reducer._install_post_backward_futures(futs) + + def _default_broadcast_coalesced( + self, bufs=None, bucket_size=None, authoritative_rank=0 + ): + """ + Broadcasts buffers from rank 0 to rest of workers. + + If bufs, bucket_size are None, default values self.modules_buffers + and self.broadcast_bucket_size are used instead. + """ + if bufs is None: + bufs = self.modules_buffers + if bucket_size is None: + bucket_size = self.broadcast_bucket_size + + self._distributed_broadcast_coalesced(bufs, bucket_size, authoritative_rank) + + def _passing_sync_batchnorm_handle(self, module): + for layer in module.modules(): + if isinstance(layer, torch.nn.modules.SyncBatchNorm): + if self.device_type == "cpu": + self._log_and_throw( + ValueError, + "SyncBatchNorm layers only work with GPU modules", + ) + + def _check_comm_hook(self, hook): + if not callable(hook): + self._log_and_throw(TypeError, "Communication hook must be callable.") + + sig = inspect.signature(hook) + if ( + sig.parameters["bucket"].annotation != inspect._empty + and sig.parameters["bucket"].annotation != dist.GradBucket + ): + self._log_and_throw( + ValueError, + "Communication hook: bucket annotation should be dist.GradBucket.", + ) + + if ( + sig.return_annotation != inspect._empty + and sig.return_annotation != torch.futures.Future[torch.Tensor] + ): + self._log_and_throw( + ValueError, + "Communication hook: return annotation should be torch.futures.Future[torch.Tensor].", + ) + + if hook.__name__ in [ + "bf16_compress_hook", + "bf16_compress_wrapper_hook", + ] and ( + (torch.version.cuda is None and torch.version.hip is None) + or ( + torch.version.cuda is not None + and int(torch.version.cuda.split(".")[0]) < 11 + ) + or not dist.is_available() + or not dist.is_nccl_available() + or torch.cuda.nccl.version() < (2, 10) + ): + self._log_and_throw( + TypeError, + "BF16 all reduce communication hook required CUDA 11+ and NCCL 2.10+.", + ) + + @property + def _distributed_rank(self): + return dist.get_rank(self.process_group) + + @staticmethod + def _get_data_parallel_params(module, named_params=False): + """Return a generator of parameters managed by a given DDP unit.""" + for param in ( + module.parameters() if not named_params else module.named_parameters() + ): + if not hasattr(param, "_ddp_ignored"): + yield param + + @staticmethod + def _set_params_and_buffers_to_ignore_for_model( + module, params_and_buffers_to_ignore + ): + """ + Set parameters and buffers to be ignored by DDP. + + Expected format for parameters is the fully qualified name: {module_name}.{param_name}, and + similarly, {module_name}.{buffer_name} for buffers. For example: + params_to_ignore = [] + # NB: model here is vanilla PyTorch module, not yet wrapped with DDP. + for module_name, module in model.named_modules(): + for param_name, param in module.named_parameters(recurse=False): + if should_ignore(param): + # Create expected format + fqn = f"{module_name}.{param_name}" + params_to_ignore.append(fqn) + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, + params_to_ignore + ) + """ + # This is a workaround to set parameters and buffers DDP should ignore + # during synchronization. It will be removed when the API is finalized + # as part of addressing https://github.com/pytorch/pytorch/issues/43690. + module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore + for name, param in module.named_parameters(): + if name in params_and_buffers_to_ignore: + param._ddp_ignored = True + for name, buffer in module.named_buffers(): + if name in params_and_buffers_to_ignore: + buffer._ddp_ignored = True + + def _get_ddp_logging_data(self): + r""" + Return a dictionary of logging data for debugging and analysis. + + This interface can be called after DistributedDataParallel() is + constructed. It returns a dictionary of logging data. It could help + for debugging and analysis. The logging data includes DistributedDataParallel + constructor input parameters, some internal states of DistributedDataParallel + and performance metrics. Simply print the dictionary and see what + these metrics are. + This is a prototype interface and subject to change in the future. + """ + assert self.logger is not None + ddp_logging_data = self.logger._get_ddp_logging_data() + return {**ddp_logging_data.strs_map, **ddp_logging_data.ints_map} + + def _set_ddp_runtime_logging_sample_rate(self, sample_rate): + r""" + Set sample_rate of collecting runtime stats. + + This interface allows users to set sample_rate of collecting + runtime stats. The runtime stats will be recorded for the + first 10 iterations, after 10 iterations runtime stats will be + recorded once every "sample_rate" training iterations. In + default, runtime stats are recorded for the first 10 iterations, + after 10 iterations runtime stats are recorded once every + "kDDPRuntimeLoggingSampleRate=100" training iterations. + This is a prototype interface and subject to change in the future. + """ + if sample_rate < 1: + self._log_and_throw( + ValueError, + "DDP runtime logging sample rate should be equal or greater than 1", + ) + self.reducer._set_ddp_runtime_logging_sample_rate(sample_rate) + + def _set_static_graph(self): + """ + Set static graph for DDP. + + It is recommended to set static graph in the DDP constructor, which will + call this private API internally. + """ + # If self.static_graph has been set, no need to set it again + if self.static_graph: + warnings.warn( + "You've set static_graph to be True, no need to set it again." + ) + return + self.static_graph = True + self._static_graph_delay_allreduce_enqueued = False + self.reducer._set_static_graph() + assert self.logger is not None + self.logger._set_static_graph() + if self.find_unused_parameters: + warnings.warn( + "You passed find_unused_parameters=true to DistributedDataParallel, " + "`_set_static_graph` will detect unused parameters automatically, so " + "you do not need to set find_unused_parameters=true, just be sure these " + "unused parameters will not change during training loop while calling " + "`_set_static_graph`." + ) + + def _remove_autograd_hooks(self): + """Remove autograd hooks registered by the reducer on the model parameters.""" + self.reducer._remove_autograd_hooks() + + def _check_reducer_finalized(self): + """ + Check if the reducer has processed all buckets and finalized the backward appropriately. + + It is useful to call this method after calling .backward() in your training loop + in order to avoid subsequent hard to debug errors down the road due to the + reducer not finalizing backward. + """ + self.reducer._check_reducer_finalized() + + def _set_sparse_metadata(self, global_unique_ids): + self.reducer._set_sparse_metadata(global_unique_ids) + + def _update_process_group(self, new_process_group): + """ + Dynamically updates the process group for DDP so that we can shrink/expand DDP + world size without having to reinitialize DDP. + + NOTE: If you are using custom communications hooks via, register_comm_hook, + you need to update the process groups for those hooks separately. + """ + # Force a rebuild of buckets for a new process group. This ensures all ranks + # are synchronized in terms of when they will rebuild buckets and also + # re-evaluates previous assumptions of buckets given the world size might have + # changed. + self._has_rebuilt_buckets = False + self.reducer._reset_state() + + if not _rank_not_in_group(new_process_group): + self.process_group = new_process_group + self.reducer._update_process_group(new_process_group) diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py b/venv/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py new file mode 100644 index 0000000000000000000000000000000000000000..6a90f897fa8ada1575524e50474df402b9c42a0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py @@ -0,0 +1,110 @@ +import threading +import torch +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast +from ..modules import Module +from torch.cuda._utils import _get_device_index +from torch.cuda.amp import autocast +from torch._utils import ExceptionWrapper + +__all__ = ['get_a_var', 'parallel_apply'] + +def get_a_var(obj: Union[torch.Tensor, List[Any], Tuple[Any, ...], Dict[Any, Any]]) -> Optional[torch.Tensor]: + if isinstance(obj, torch.Tensor): + return obj + + if isinstance(obj, (list, tuple)): + for result in map(get_a_var, obj): + if isinstance(result, torch.Tensor): + return result + if isinstance(obj, dict): + for result in map(get_a_var, obj.items()): + if isinstance(result, torch.Tensor): + return result + return None + +def parallel_apply( + modules: Sequence[Module], + inputs: Sequence[Any], + kwargs_tup: Optional[Sequence[Dict[str, Any]]] = None, + devices: Optional[Sequence[Optional[Union[int, torch.device]]]] = None, +) -> List[Any]: + r"""Apply each `module` in :attr:`modules` in parallel on each of :attr:`devices`. + + Args: + modules (Module): modules to be parallelized + inputs (tensor): inputs to the modules + devices (list of int or torch.device): CUDA devices + + :attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and + :attr:`devices` (if given) should all have same length. Moreover, each + element of :attr:`inputs` can either be a single object as the only argument + to a module, or a collection of positional arguments. + """ + assert len(modules) == len(inputs), f'The number of modules {len(modules)} is not equal to the number of inputs {len(inputs)}' + if kwargs_tup is not None: + assert len(modules) == len(kwargs_tup) + else: + kwargs_tup = (cast(Dict[str, Any], {}),) * len(modules) + if devices is not None: + assert len(modules) == len(devices) + else: + devices = [None] * len(modules) + devices = [_get_device_index(x, True) for x in devices] + streams = [torch.cuda.current_stream(x) for x in devices] + lock = threading.Lock() + results = {} + grad_enabled, autocast_enabled = torch.is_grad_enabled(), torch.is_autocast_enabled() + + def _worker( + i: int, + module: Module, + input: Any, + kwargs: Dict[str, Any], + device: Optional[Union[int, torch.device]] = None, + stream: Optional[torch.cuda.Stream] = None, + ) -> None: + torch.set_grad_enabled(grad_enabled) + if device is None: + t = get_a_var(input) + if t is None: + with lock: + results[i] = ExceptionWrapper( + where=f"in replica {i}, no device was provided and no tensor input was found; " + "device cannot be resolved") + return + device = t.get_device() + if stream is None: + stream = torch.cuda.current_stream(device) + try: + with torch.cuda.device(device), torch.cuda.stream(stream), autocast(enabled=autocast_enabled): + # this also avoids accidental slicing of `input` if it is a Tensor + if not isinstance(input, (list, tuple)): + input = (input,) + output = module(*input, **kwargs) + with lock: + results[i] = output + except Exception: + with lock: + results[i] = ExceptionWrapper( + where=f"in replica {i} on device {device}") + + if len(modules) > 1: + threads = [threading.Thread(target=_worker, + args=(i, module, input, kwargs, device, stream)) + for i, (module, input, kwargs, device, stream) in + enumerate(zip(modules, inputs, kwargs_tup, devices, streams))] + + for thread in threads: + thread.start() + for thread in threads: + thread.join() + else: + _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0], streams[0]) + + outputs = [] + for i in range(len(inputs)): + output = results[i] + if isinstance(output, ExceptionWrapper): + output.reraise() + outputs.append(output) + return outputs diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/replicate.py b/venv/lib/python3.10/site-packages/torch/nn/parallel/replicate.py new file mode 100644 index 0000000000000000000000000000000000000000..016a6fbd0c40d510d4c123923e16d62514c71c45 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/parallel/replicate.py @@ -0,0 +1,186 @@ +import torch +from ..modules import Module +from . import comm +from typing import TYPE_CHECKING, Dict, Iterator, List, Optional, Sequence, Set, TypeVar, Union, cast +from torch._utils import _get_device_index + +from collections import OrderedDict + +if TYPE_CHECKING: + import torch.jit + import torch.jit._state + +__all__ = ['replicate'] + +def _is_script_module(module: Module) -> bool: + import torch.jit + return isinstance(module, torch.jit.ScriptModule) + + +def _is_script_method(module: Module) -> bool: + import torch.jit + return isinstance(module, torch._C.ScriptMethod) + + +def _init_script_module() -> "torch.jit.ScriptModule": + import torch.jit + return torch.jit.ScriptModule() + + +def _is_jit_enabled() -> "torch.jit._state.EnabledProxy": + import torch.jit._state + return torch.jit._state._enabled + + +# Check if we can safely replicate the module. +# there are two types of module: +# 1. python modules +# 2. ScriptModule +# +# currently a module cannot be replicated properly if the descendants of +# any ScriptModule contains python module (type 1 above) +def _replicatable_module(module: Module, memo: Optional[Set[Module]] = None) -> bool: + + # module.modules() contains module itself as the first element + def descendant_modules(module: Module) -> Iterator[Module]: + gen = module.modules() + next(gen) + return gen + + if not _is_jit_enabled(): + return True + if memo is None: + memo = set() + + # memoize visited modules + memo.add(module) + if _is_script_module(module): + memo.update(descendant_modules(module)) + return all(_is_script_module(descendant) for + descendant in descendant_modules(module)) + + for child in module.children(): + # since any unreplicatable module will cause the check to return + # False early, visited modules here can be safely ignored. + if child in memo: + continue + if not _replicatable_module(child, memo): + return False + + return True + +def _broadcast_coalesced_reshape( + tensors: Sequence[torch.Tensor], + devices: Sequence[Union[int, torch.device]], + detach: bool = False, +) -> List[List[torch.Tensor]]: + from ._functions import Broadcast + if detach: + return comm.broadcast_coalesced(tensors, devices) + else: + # Use the autograd function to broadcast if not detach + if len(tensors) > 0: + tensor_copies = Broadcast.apply(devices, *tensors) + return [tensor_copies[i:i + len(tensors)] + for i in range(0, len(tensor_copies), len(tensors))] + else: + return [] + + +T = TypeVar("T", bound=Module) + + +def replicate( + network: T, + devices: Sequence[Union[int, torch.device]], + detach: bool = False, +) -> List[T]: + if not _replicatable_module(network): + raise RuntimeError("Cannot replicate network where python modules are " + "childrens of ScriptModule") + + if not devices: + return [] + + devices = [_get_device_index(x, True) for x in devices] + num_replicas = len(devices) + + params = list(network.parameters()) + param_indices = {param: idx for idx, param in enumerate(params)} + param_copies = _broadcast_coalesced_reshape(params, devices, detach) + + buffers = list(network.buffers()) + buffers_rg: List[torch.Tensor] = [] + buffers_not_rg: List[torch.Tensor] = [] + for buf in buffers: + if buf.requires_grad and not detach: + buffers_rg.append(buf) + else: + buffers_not_rg.append(buf) + + buffer_indices_rg = {buf: idx for idx, buf in enumerate(buffers_rg)} + buffer_indices_not_rg = {buf: idx for idx, buf in enumerate(buffers_not_rg)} + + buffer_copies_rg = _broadcast_coalesced_reshape(buffers_rg, devices, detach=detach) + buffer_copies_not_rg = _broadcast_coalesced_reshape(buffers_not_rg, devices, detach=True) + + modules = list(network.modules()) + module_copies: List[List[Module]] = [[] for _ in devices] + module_indices: Dict[Module, int] = {} + + for i, module in enumerate(modules): + module_indices[module] = i + for j in range(num_replicas): + replica = module._replicate_for_data_parallel() + # This is a temporary fix for DDP. DDP needs to access the + # replicated model parameters. It used to do so through + # `mode.parameters()`. The fix added in #33907 for DP stops the + # `parameters()` API from exposing the replicated parameters. + # Hence, we add a `_former_parameters` dict here to support DDP. + replica._former_parameters = OrderedDict() + + module_copies[j].append(replica) + + for i, module in enumerate(modules): + for key, child in module._modules.items(): + if child is None: + for j in range(num_replicas): + replica = module_copies[j][i] + replica._modules[key] = None + else: + module_idx = module_indices[child] + for j in range(num_replicas): + replica = module_copies[j][i] + setattr(replica, key, module_copies[j][module_idx]) + for key, param in module._parameters.items(): + if param is None: + for j in range(num_replicas): + replica = module_copies[j][i] + replica._parameters[key] = None + else: + param_idx = param_indices[param] + for j in range(num_replicas): + replica = module_copies[j][i] + param_copy = param_copies[j][param_idx] + # parameters in replicas are no longer leaves, + # so setattr them as non-parameter attributes + setattr(replica, key, param_copy) + # expose the parameter for DDP + replica._former_parameters[key] = param_copy + for key, buf in module._buffers.items(): # type: ignore[assignment] + if buf is None: + for j in range(num_replicas): + replica = module_copies[j][i] + replica._buffers[key] = None + else: + if buf.requires_grad and not detach: + buffer_copies = buffer_copies_rg + buffer_idx = buffer_indices_rg[buf] + else: + buffer_copies = buffer_copies_not_rg + buffer_idx = buffer_indices_not_rg[buf] + for j in range(num_replicas): + replica = module_copies[j][i] + setattr(replica, key, buffer_copies[j][buffer_idx]) + + return [cast(T, module_copies[j][0]) for j in range(num_replicas)] diff --git a/venv/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.py b/venv/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.py new file mode 100644 index 0000000000000000000000000000000000000000..8daa1117bfaf98246f83acf9e2b79666ccdf6ef8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.py @@ -0,0 +1,107 @@ +import torch +from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar, Union, overload +from ._functions import Scatter, Gather +import warnings + +__all__ = ['scatter', 'scatter_kwargs', 'gather'] + +def is_namedtuple(obj: Any) -> bool: + # Check if type was created from collections.namedtuple or a typing.NamedTuple. + warnings.warn("is_namedtuple is deprecated, please use the python checks instead") + return _is_namedtuple(obj) + +def _is_namedtuple(obj: Any) -> bool: + # Check if type was created from collections.namedtuple or a typing.NamedTuple. + return ( + isinstance(obj, tuple) and hasattr(obj, "_asdict") and hasattr(obj, "_fields") + ) + + +T = TypeVar("T", dict, list, tuple) + +# For some reason, 'scatter' returns a tuple when given a single Tensor input but a list otherwise. +@overload +def scatter( + inputs: torch.Tensor, + target_gpus: Sequence[Union[int, torch.device]], + dim: int = ..., +) -> Tuple[torch.Tensor, ...]: + ... + +@overload +def scatter(inputs: T, target_gpus: Sequence[Union[int, torch.device]], dim: int = ...) -> List[T]: + ... + +def scatter(inputs, target_gpus, dim=0): + r"""Slice tensors into approximately equal chunks and distributes them across given GPUs. + + Duplicates references to objects that are not tensors. + """ + def scatter_map(obj): + if isinstance(obj, torch.Tensor): + return Scatter.apply(target_gpus, None, dim, obj) + if _is_namedtuple(obj): + return [type(obj)(*args) for args in zip(*map(scatter_map, obj))] + if isinstance(obj, tuple) and len(obj) > 0: + return list(zip(*map(scatter_map, obj))) + if isinstance(obj, list) and len(obj) > 0: + return [list(i) for i in zip(*map(scatter_map, obj))] + if isinstance(obj, dict) and len(obj) > 0: + return [type(obj)(i) for i in zip(*map(scatter_map, obj.items()))] + return [obj for _ in target_gpus] + + # After scatter_map is called, a scatter_map cell will exist. This cell + # has a reference to the actual function scatter_map, which has references + # to a closure that has a reference to the scatter_map cell (because the + # fn is recursive). To avoid this reference cycle, we set the function to + # None, clearing the cell + try: + res = scatter_map(inputs) + finally: + scatter_map = None # type: ignore[assignment] + return res + + +def scatter_kwargs( + inputs: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]], + target_gpus: Sequence[Union[int, torch.device]], + dim: int = 0, +) -> Tuple[Tuple[Any, ...], Tuple[Dict[str, Any], ...]]: + r"""Scatter with support for kwargs dictionary.""" + scattered_inputs = scatter(inputs, target_gpus, dim) if inputs else [] + scattered_kwargs = scatter(kwargs, target_gpus, dim) if kwargs else [] + if len(scattered_inputs) < len(scattered_kwargs): + scattered_inputs.extend(() for _ in range(len(scattered_kwargs) - len(scattered_inputs))) + elif len(scattered_kwargs) < len(inputs): + scattered_kwargs.extend({} for _ in range(len(scattered_inputs) - len(scattered_kwargs))) + return tuple(scattered_inputs), tuple(scattered_kwargs) + + +def gather(outputs: Any, target_device: Union[int, torch.device], dim: int = 0) -> Any: + r"""Gather tensors from different GPUs on a specified device. + + Use 'cpu' for CPU to avoid a deprecation warning. + """ + def gather_map(outputs): + out = outputs[0] + if isinstance(out, torch.Tensor): + return Gather.apply(target_device, dim, *outputs) + if out is None: + return None + if isinstance(out, dict): + if not all(len(out) == len(d) for d in outputs): + raise ValueError('All dicts must have the same number of keys') + return type(out)((k, gather_map([d[k] for d in outputs])) + for k in out) + if _is_namedtuple(out): + return type(out)._make(map(gather_map, zip(*outputs))) + return type(out)(map(gather_map, zip(*outputs))) + + # Recursive function calls like this create reference cycles. + # Setting the function to None clears the refcycle. + try: + res = gather_map(outputs) + finally: + gather_map = None # type: ignore[assignment] + return res diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2666140e8e2503d1bd8fd00e5b9a34ec464056fa Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_deprecation_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_deprecation_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e0daf3334fb3f06043b6fbbff4d314ea7cc2f02 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_deprecation_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_per_sample_grad.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_per_sample_grad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..518ebc95a4f72f870c886c4ad68b4481e63283e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/_per_sample_grad.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/init.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b0877c160d76da8e1c796cde5307f4f9875e996 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/init.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/memory_format.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/memory_format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce5561275fef3d15a1ad027941d806eb147f1f6d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/memory_format.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/spectral_norm.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/spectral_norm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5082b5ea58c78169cd36ad3ed2d6251697ed731 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/spectral_norm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/stateless.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/stateless.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12f69b64a1820af7a5d0da8f5eec13d399393da5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/nn/utils/__pycache__/stateless.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/_deprecation_utils.py b/venv/lib/python3.10/site-packages/torch/nn/utils/_deprecation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1b2a9b6e29f2f2f0157f97e8210d13751e0bcb8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/utils/_deprecation_utils.py @@ -0,0 +1,45 @@ +from typing import List, Callable +import importlib +import warnings + + +_MESSAGE_TEMPLATE = r"Usage of '{old_location}' is deprecated; please use '{new_location}' instead." + +def lazy_deprecated_import(all: List[str], old_module: str, new_module: str) -> Callable: + r"""Import utility to lazily import deprecated packages / modules / functional. + + The old_module and new_module are also used in the deprecation warning defined + by the `_MESSAGE_TEMPLATE`. + + Args: + all: The list of the functions that are imported. Generally, the module's + __all__ list of the module. + old_module: Old module location + new_module: New module location / Migrated location + + Returns: + Callable to assign to the `__getattr__` + + Usage: + + # In the `torch/nn/quantized/functional.py` + from torch.nn.utils._deprecation_utils import lazy_deprecated_import + _MIGRATED_TO = "torch.ao.nn.quantized.functional" + __getattr__ = lazy_deprecated_import( + all=__all__, + old_module=__name__, + new_module=_MIGRATED_TO) + """ + warning_message = _MESSAGE_TEMPLATE.format( + old_location=old_module, + new_location=new_module) + + def getattr_dunder(name): + if name in all: + # We are using the "RuntimeWarning" to make sure it is not + # ignored by default. + warnings.warn(warning_message, RuntimeWarning) + package = importlib.import_module(new_module) + return getattr(package, name) + raise AttributeError(f"Module {new_module!r} has no attribute {name!r}.") + return getattr_dunder diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/clip_grad.py b/venv/lib/python3.10/site-packages/torch/nn/utils/clip_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..6549a6f3e2c8db1c9f46ba5f6a28d641e8871f6f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/utils/clip_grad.py @@ -0,0 +1,151 @@ +import warnings +import functools +from typing import Union, Iterable, List, Dict, Tuple, Optional, cast + +import torch +from torch import Tensor +from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype, _has_foreach_support, _device_has_foreach_support + +_tensor_or_tensors = Union[torch.Tensor, Iterable[torch.Tensor]] + +__all__ = ['clip_grad_norm_', 'clip_grad_norm', 'clip_grad_value_'] + +def _no_grad(func): + """ + This wrapper is needed to avoid a circular import when using @torch.no_grad on the exposed functions + clip_grad_norm_ and clip_grad_value_ themselves. + """ + def _no_grad_wrapper(*args, **kwargs): + with torch.no_grad(): + return func(*args, **kwargs) + functools.update_wrapper(_no_grad_wrapper, func) + return _no_grad_wrapper + +@_no_grad +def clip_grad_norm_( + parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.0, + error_if_nonfinite: bool = False, foreach: Optional[bool] = None) -> torch.Tensor: + r"""Clip the gradient norm of an iterable of parameters. + + The norm is computed over all gradients together, as if they were + concatenated into a single vector. Gradients are modified in-place. + + Args: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + max_norm (float): max norm of the gradients + norm_type (float): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + error_if_nonfinite (bool): if True, an error is thrown if the total + norm of the gradients from :attr:`parameters` is ``nan``, + ``inf``, or ``-inf``. Default: False (will switch to True in the future) + foreach (bool): use the faster foreach-based implementation. + If ``None``, use the foreach implementation for CUDA and CPU native tensors and silently + fall back to the slow implementation for other device types. + Default: ``None`` + + Returns: + Total norm of the parameter gradients (viewed as a single vector). + """ + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + grads = [p.grad for p in parameters if p.grad is not None] + max_norm = float(max_norm) + norm_type = float(norm_type) + if len(grads) == 0: + return torch.tensor(0.) + first_device = grads[0].device + grouped_grads: Dict[Tuple[torch.device, torch.dtype], Tuple[List[List[Tensor]], List[int]]] \ + = _group_tensors_by_device_and_dtype([grads]) # type: ignore[assignment] + + norms: List[Tensor] = [] + for ((device, _), ([device_grads], _)) in grouped_grads.items(): # type: ignore[assignment] + if ( + (foreach is None and _has_foreach_support(device_grads, device)) + or (foreach and _device_has_foreach_support(device)) + ): + norms.extend(torch._foreach_norm(device_grads, norm_type)) + elif foreach: + raise RuntimeError(f'foreach=True was passed, but can\'t use the foreach API on {device.type} tensors') + else: + norms.extend([torch.linalg.vector_norm(g, norm_type) for g in device_grads]) + + total_norm = torch.linalg.vector_norm(torch.stack([norm.to(first_device) for norm in norms]), norm_type) + + if error_if_nonfinite and torch.logical_or(total_norm.isnan(), total_norm.isinf()): + raise RuntimeError( + f'The total norm of order {norm_type} for gradients from ' + '`parameters` is non-finite, so it cannot be clipped. To disable ' + 'this error and scale the gradients by the non-finite norm anyway, ' + 'set `error_if_nonfinite=False`') + clip_coef = max_norm / (total_norm + 1e-6) + # Note: multiplying by the clamped coef is redundant when the coef is clamped to 1, but doing so + # avoids a `if clip_coef < 1:` conditional which can require a CPU <=> device synchronization + # when the gradients do not reside in CPU memory. + clip_coef_clamped = torch.clamp(clip_coef, max=1.0) + for ((device, _), ([device_grads], _)) in grouped_grads.items(): # type: ignore[assignment] + if ( + (foreach is None and _has_foreach_support(device_grads, device)) + or (foreach and _device_has_foreach_support(device)) + ): + torch._foreach_mul_(device_grads, clip_coef_clamped.to(device)) + elif foreach: + raise RuntimeError(f'foreach=True was passed, but can\'t use the foreach API on {device.type} tensors') + else: + clip_coef_clamped_device = clip_coef_clamped.to(device) + for g in device_grads: + g.mul_(clip_coef_clamped_device) + + return total_norm + + +def clip_grad_norm( + parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2., + error_if_nonfinite: bool = False, foreach: Optional[bool] = None) -> torch.Tensor: + r"""Clip the gradient norm of an iterable of parameters. + + .. warning:: + This method is now deprecated in favor of + :func:`torch.nn.utils.clip_grad_norm_`. + """ + warnings.warn("torch.nn.utils.clip_grad_norm is now deprecated in favor " + "of torch.nn.utils.clip_grad_norm_.", stacklevel=2) + return clip_grad_norm_(parameters, max_norm, norm_type, error_if_nonfinite, foreach) + + +@_no_grad +def clip_grad_value_(parameters: _tensor_or_tensors, clip_value: float, foreach: Optional[bool] = None) -> None: + r"""Clip the gradients of an iterable of parameters at specified value. + + Gradients are modified in-place. + + Args: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + clip_value (float): maximum allowed value of the gradients. + The gradients are clipped in the range + :math:`\left[\text{-clip\_value}, \text{clip\_value}\right]` + foreach (bool): use the faster foreach-based implementation + If ``None``, use the foreach implementation for CUDA and CPU native tensors and + silently fall back to the slow implementation for other device types. + Default: ``None`` + """ + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + clip_value = float(clip_value) + + grads = [p.grad for p in parameters if p.grad is not None] + grouped_grads = _group_tensors_by_device_and_dtype([grads]) + + for ((device, _), ([grads], _)) in grouped_grads.items(): # type: ignore[assignment] + if ( + (foreach is None and _has_foreach_support(cast(List[Tensor], grads), device=device)) + or (foreach and _device_has_foreach_support(device)) + ): + torch._foreach_clamp_min_(cast(List[Tensor], grads), -clip_value) + torch._foreach_clamp_max_(cast(List[Tensor], grads), clip_value) + elif foreach: + raise RuntimeError(f'foreach=True was passed, but can\'t use the foreach API on {device.type} tensors') + else: + for grad in grads: + cast(Tensor, grad).clamp_(min=-clip_value, max=clip_value) diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/convert_parameters.py b/venv/lib/python3.10/site-packages/torch/nn/utils/convert_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..e23352b6b6d9bb2f32df6fb26401e4b8d9281636 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/utils/convert_parameters.py @@ -0,0 +1,83 @@ +import torch +from typing import Iterable, Optional + + +def parameters_to_vector(parameters: Iterable[torch.Tensor]) -> torch.Tensor: + r"""Flatten an iterable of parameters into a single vector. + + Args: + parameters (Iterable[Tensor]): an iterable of Tensors that are the + parameters of a model. + + Returns: + The parameters represented by a single vector + """ + # Flag for the device where the parameter is located + param_device = None + + vec = [] + for param in parameters: + # Ensure the parameters are located in the same device + param_device = _check_param_device(param, param_device) + + vec.append(param.view(-1)) + return torch.cat(vec) + + +def vector_to_parameters(vec: torch.Tensor, parameters: Iterable[torch.Tensor]) -> None: + r"""Copy slices of a vector into an iterable of parameters. + + Args: + vec (Tensor): a single vector representing the parameters of a model. + parameters (Iterable[Tensor]): an iterable of Tensors that are the + parameters of a model. + """ + # Ensure vec of type Tensor + if not isinstance(vec, torch.Tensor): + raise TypeError(f'expected torch.Tensor, but got: {torch.typename(vec)}') + # Flag for the device where the parameter is located + param_device = None + + # Pointer for slicing the vector for each parameter + pointer = 0 + for param in parameters: + # Ensure the parameters are located in the same device + param_device = _check_param_device(param, param_device) + + # The length of the parameter + num_param = param.numel() + # Slice the vector, reshape it, and replace the old data of the parameter + param.data = vec[pointer:pointer + num_param].view_as(param).data + + # Increment the pointer + pointer += num_param + + +def _check_param_device(param: torch.Tensor, old_param_device: Optional[int]) -> int: + r"""Check if the parameters are located on the same device. + + Currently, the conversion between model parameters and single vector form is not supported + for multiple allocations, e.g. parameters in different GPUs/PrivateUse1s, or mixture of CPU/GPU/PrivateUse1. + + Args: + param ([Tensor]): a Tensor of a parameter of a model + old_param_device (int): the device where the first parameter of a + model is allocated. + + Returns: + old_param_device (int): report device for the first time + """ + # Meet the first parameter + support_device_types = ["cuda", torch._C._get_privateuse1_backend_name()] + if old_param_device is None: + old_param_device = param.get_device() if param.device.type in support_device_types else -1 + else: + warn = False + if param.device.type in support_device_types: # Check if in same GPU/PrivateUse1 + warn = (param.get_device() != old_param_device) + else: # Check if in CPU + warn = (old_param_device != -1) + if warn: + raise TypeError('Found two parameters on different devices, ' + 'this is currently not supported.') + return old_param_device diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/fusion.py b/venv/lib/python3.10/site-packages/torch/nn/utils/fusion.py new file mode 100644 index 0000000000000000000000000000000000000000..9433d9c376df81787e91a4ca4dd3698107f32bc5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/utils/fusion.py @@ -0,0 +1,152 @@ +from __future__ import annotations + +import copy +from typing import Optional, Tuple, TypeVar + +import torch + +__all__ = ['fuse_conv_bn_eval', 'fuse_conv_bn_weights', 'fuse_linear_bn_eval', 'fuse_linear_bn_weights'] + +ConvT = TypeVar("ConvT", bound="torch.nn.modules.conv._ConvNd") +LinearT = TypeVar("LinearT", bound="torch.nn.Linear") + +def fuse_conv_bn_eval(conv: ConvT, bn: torch.nn.modules.batchnorm._BatchNorm, transpose: bool = False) -> ConvT: + r"""Fuse a convolutional module and a BatchNorm module into a single, new convolutional module. + + Args: + conv (torch.nn.modules.conv._ConvNd): A convolutional module. + bn (torch.nn.modules.batchnorm._BatchNorm): A BatchNorm module. + transpose (bool, optional): If True, transpose the convolutional weight. Defaults to False. + + Returns: + torch.nn.modules.conv._ConvNd: The fused convolutional module. + + .. note:: + Both ``conv`` and ``bn`` must be in eval mode, and ``bn`` must have its running buffers computed. + """ + assert not (conv.training or bn.training), "Fusion only for eval!" + fused_conv = copy.deepcopy(conv) + + assert bn.running_mean is not None and bn.running_var is not None + fused_conv.weight, fused_conv.bias = fuse_conv_bn_weights( + fused_conv.weight, fused_conv.bias, + bn.running_mean, bn.running_var, bn.eps, bn.weight, bn.bias, transpose) + + return fused_conv + +def fuse_conv_bn_weights( + conv_w: torch.Tensor, + conv_b: Optional[torch.Tensor], + bn_rm: torch.Tensor, + bn_rv: torch.Tensor, + bn_eps: float, + bn_w: Optional[torch.Tensor], + bn_b: Optional[torch.Tensor], + transpose: bool = False +) -> Tuple[torch.nn.Parameter, torch.nn.Parameter]: + r"""Fuse convolutional module parameters and BatchNorm module parameters into new convolutional module parameters. + + Args: + conv_w (torch.Tensor): Convolutional weight. + conv_b (Optional[torch.Tensor]): Convolutional bias. + bn_rm (torch.Tensor): BatchNorm running mean. + bn_rv (torch.Tensor): BatchNorm running variance. + bn_eps (float): BatchNorm epsilon. + bn_w (Optional[torch.Tensor]): BatchNorm weight. + bn_b (Optional[torch.Tensor]): BatchNorm bias. + transpose (bool, optional): If True, transpose the conv weight. Defaults to False. + + Returns: + Tuple[torch.nn.Parameter, torch.nn.Parameter]: Fused convolutional weight and bias. + """ + conv_weight_dtype = conv_w.dtype + conv_bias_dtype = conv_b.dtype if conv_b is not None else conv_weight_dtype + if conv_b is None: + conv_b = torch.zeros_like(bn_rm) + if bn_w is None: + bn_w = torch.ones_like(bn_rm) + if bn_b is None: + bn_b = torch.zeros_like(bn_rm) + bn_var_rsqrt = torch.rsqrt(bn_rv + bn_eps) + + if transpose: + shape = [1, -1] + [1] * (len(conv_w.shape) - 2) + else: + shape = [-1, 1] + [1] * (len(conv_w.shape) - 2) + + fused_conv_w = (conv_w * (bn_w * bn_var_rsqrt).reshape(shape)).to(dtype=conv_weight_dtype) + fused_conv_b = ((conv_b - bn_rm) * bn_var_rsqrt * bn_w + bn_b).to(dtype=conv_bias_dtype) + + return ( + torch.nn.Parameter(fused_conv_w, conv_w.requires_grad), torch.nn.Parameter(fused_conv_b, conv_b.requires_grad) + ) + +def fuse_linear_bn_eval(linear: LinearT, bn: torch.nn.modules.batchnorm._BatchNorm) -> LinearT: + r"""Fuse a linear module and a BatchNorm module into a single, new linear module. + + Args: + linear (torch.nn.Linear): A Linear module. + bn (torch.nn.modules.batchnorm._BatchNorm): A BatchNorm module. + + Returns: + torch.nn.Linear: The fused linear module. + + .. note:: + Both ``linear`` and ``bn`` must be in eval mode, and ``bn`` must have its running buffers computed. + """ + assert not (linear.training or bn.training), "Fusion only for eval!" + fused_linear = copy.deepcopy(linear) + + """ + Linear-BN needs to be fused while preserving the shapes of linear weight/bias. + To preserve the shapes of linear weight/bias, the channel dim of bn needs to be broadcastable with the last dim of linear, + because bn operates over the channel dim, (N, C_in, H, W) while linear operates over the last dim, (*, H_in). + To be broadcastable, the number of features in bn and + the number of output features from linear must satisfy the following condition: + 1. they are equal, or + 2. the number of features in bn is 1 + Otherwise, skip the folding path + """ + assert ( + linear.out_features == bn.num_features or bn.num_features == 1 + ), "To fuse, linear.out_features == bn.num_features or bn.num_features == 1" + + assert bn.running_mean is not None and bn.running_var is not None + fused_linear.weight, fused_linear.bias = fuse_linear_bn_weights( + fused_linear.weight, fused_linear.bias, + bn.running_mean, bn.running_var, bn.eps, bn.weight, bn.bias) + + return fused_linear + +def fuse_linear_bn_weights( + linear_w: torch.Tensor, + linear_b: Optional[torch.Tensor], + bn_rm: torch.Tensor, + bn_rv: torch.Tensor, + bn_eps: float, + bn_w: torch.Tensor, + bn_b: torch.Tensor, +) -> Tuple[torch.nn.Parameter, torch.nn.Parameter]: + r"""Fuse linear module parameters and BatchNorm module parameters into new linear module parameters. + + Args: + linear_w (torch.Tensor): Linear weight. + linear_b (Optional[torch.Tensor]): Linear bias. + bn_rm (torch.Tensor): BatchNorm running mean. + bn_rv (torch.Tensor): BatchNorm running variance. + bn_eps (float): BatchNorm epsilon. + bn_w (torch.Tensor): BatchNorm weight. + bn_b (torch.Tensor): BatchNorm bias. + transpose (bool, optional): If True, transpose the conv weight. Defaults to False. + + Returns: + Tuple[torch.nn.Parameter, torch.nn.Parameter]: Fused linear weight and bias. + """ + if linear_b is None: + linear_b = torch.zeros_like(bn_rm) + bn_scale = bn_w * torch.rsqrt(bn_rv + bn_eps) + + fused_w = linear_w * bn_scale.unsqueeze(-1) + fused_b = (linear_b - bn_rm) * bn_scale + bn_b + + return torch.nn.Parameter(fused_w, linear_w.requires_grad), torch.nn.Parameter(fused_b, linear_b.requires_grad) diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/init.py b/venv/lib/python3.10/site-packages/torch/nn/utils/init.py new file mode 100644 index 0000000000000000000000000000000000000000..416ad0db8ef7ef64301614184f611a52c1a01e31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/utils/init.py @@ -0,0 +1,53 @@ +import inspect +import torch + + +def skip_init(module_cls, *args, **kwargs): + r""" + Given a module class object and args / kwargs, instantiate the module without initializing parameters / buffers. + + This can be useful if initialization is slow or if custom initialization will + be performed, making the default initialization unnecessary. There are some caveats to this, due to + the way this function is implemented: + + 1. The module must accept a `device` arg in its constructor that is passed to any parameters + or buffers created during construction. + + 2. The module must not perform any computation on parameters in its constructor except + initialization (i.e. functions from :mod:`torch.nn.init`). + + If these conditions are satisfied, the module can be instantiated with parameter / buffer values + uninitialized, as if having been created using :func:`torch.empty`. + + Args: + module_cls: Class object; should be a subclass of :class:`torch.nn.Module` + args: args to pass to the module's constructor + kwargs: kwargs to pass to the module's constructor + + Returns: + Instantiated module with uninitialized parameters / buffers + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> import torch + >>> m = torch.nn.utils.skip_init(torch.nn.Linear, 5, 1) + >>> m.weight + Parameter containing: + tensor([[0.0000e+00, 1.5846e+29, 7.8307e+00, 2.5250e-29, 1.1210e-44]], + requires_grad=True) + >>> m2 = torch.nn.utils.skip_init(torch.nn.Linear, in_features=6, out_features=1) + >>> m2.weight + Parameter containing: + tensor([[-1.4677e+24, 4.5915e-41, 1.4013e-45, 0.0000e+00, -1.4677e+24, + 4.5915e-41]], requires_grad=True) + + """ + if not issubclass(module_cls, torch.nn.Module): + raise RuntimeError(f'Expected a Module; got {module_cls}') + if 'device' not in inspect.signature(module_cls).parameters: + raise RuntimeError('Module must support a \'device\' arg to skip initialization') + + final_device = kwargs.pop('device', 'cpu') + kwargs['device'] = 'meta' + return module_cls(*args, **kwargs).to_empty(device=final_device) diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/memory_format.py b/venv/lib/python3.10/site-packages/torch/nn/utils/memory_format.py new file mode 100644 index 0000000000000000000000000000000000000000..c8fc22bea51cfc47006d1918d977afd2c4f3310b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/utils/memory_format.py @@ -0,0 +1,143 @@ +import torch + + +def convert_conv2d_weight_memory_format(module, memory_format): + r"""Convert ``memory_format`` of ``nn.Conv2d.weight`` to ``memory_format``. + + The conversion recursively applies to nested ``nn.Module``, including ``module``. + Note that it only changes the memory_format, but not the semantics of each dimensions. + This function is used to facilitate the computation to adopt NHWC kernels, which + provides considerable speed up for fp16 data on CUDA devices with compute capability >= 7.0 + + .. note:: + Calling ``model.to(memory_format=torch.channels_last)`` is more aggressive + than the utility function ``convert_conv2d_weight_memory_format``. Any + layer with 4d weight will be affected by ``model.to``, which does not + necessarily benefit from conversion to specified ``memory_format``. + One place we are confident in is that NHWC(channels_last) conversion for + convolution in cuDNN, As it is beneficial to run convolution in NHWC, + even in cases where we have to apply permutation to input tensors. + + Hence our strategy here is to convert only the weight of convolution to + channels_last. This ensures that; + 1. Fast convolution kernels will be used, the benefit of which could + outweigh overhead of permutation (if input is not in the same format) + 2. No unnecessary permutations are applied on layers that do not benefit + from memory_format conversion. + + The optimal case is that, layers between convolution layers are channels + last compatible. Input tensor would be permuted to channels last when it + encounters the first convolution layer and stay in that memory format. + Hence following convolutions will not need to permute its input tensor. + + In case where a channels last incompatible layer is between convolution + layers, we need to permute the input tensor back to contiguous format + for that layer. The input tensor will go through the remaining layers in + contiguous format and be permuted to channels last when it encounters + another convolution layer. There's no point in propagating that + permutation to an earlier layer, as most layers are quite agnostic to + ``memory_format``. + + This claim might change when PyTorch supports fusion of permutation, as + there might have been a better spot to fuse the permutation other than + immediately before a convolution. + + Args: + module (nn.Module): ``nn.Conv2d`` & ``nn.ConvTranspose2d`` or container + ``nn.Module`` + memory_format: user specified ``memory_format``, + e.g. ``torch.channels_last`` or ``torch.contiguous_format`` + + Returns: + The original module with updated ``nn.Conv2d`` + + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> # xdoctest: +REQUIRES(env:CUBLAS_WORKSPACE_CONFIG) + >>> input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float16, device="cuda") + >>> model = nn.Sequential( + >>> nn.Conv2d(8, 4, 3)).cuda().half() + >>> # This is identical to: + >>> # nn.utils.convert_conv2d_weight_memory_format(model, torch.channels_last) + >>> model = nn.utils.convert_conv2d_weight_memory_format(model, torch.channels_last) + >>> out = model(input) + """ + # TODO: expand this to `_ConvNd` when channels_last support is extended + # beyond only 4d tensors. + if isinstance(module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)): + weight_data = module.weight.detach().clone().contiguous(memory_format=memory_format) + module.weight.data = weight_data.resize_(weight_data.size(), memory_format=memory_format) + for child in module.children(): + convert_conv2d_weight_memory_format(child, memory_format) + return module + + +def convert_conv3d_weight_memory_format(module, memory_format): + r"""Convert ``memory_format`` of ``nn.Conv3d.weight`` to ``memory_format`` + The conversion recursively applies to nested ``nn.Module``, including ``module``. + Note that it only changes the memory_format, but not the semantics of each dimensions. + This function is used to facilitate the computation to adopt NHWC kernels, which + provides considerable speed up for fp16 data on CUDA devices with compute capability >= 7.0 + + .. note:: + Calling ``model.to(memory_format=torch.channels_last)`` is more aggressive + than the utility function ``convert_conv3d_weight_memory_format``. Any + layer with 4d weight will be affected by ``model.to``, which does not + necessarily benefit from conversion to specified ``memory_format``. + One place we are confident in is that NHWC(channels_last) conversion for + convolution in cuDNN, As it is beneficial to run convolution in NHWC, + even in cases where we have to apply permutation to input tensors. + + Hence our strategy here is to convert only the weight of convolution to + channels_last. This ensures that; + 1. Fast convolution kernels will be used, the benefit of which could + outweigh overhead of permutation (if input is not in the same format) + 2. No unnecessary permutations are applied on layers that do not benefit + from memory_format conversion. + + The optimal case is that, layers between convolution layers are channels + last compatible. Input tensor would be permuted to channels last when it + encounters the first convolution layer and stay in that memory format. + Hence following convolutions will not need to permute its input tensor. + + In case where a channels last incompatible layer is between convolution + layers, we need to permute the input tensor back to contiguous format + for that layer. The input tensor will go through the remaining layers in + contiguous format and be permuted to channels last when it encounters + another convolution layer. There's no point in propagating that + permutation to an earlier layer, as most layers are quite agnostic to + ``memory_format``. + + This claim might change when PyTorch supports fusion of permutation, as + there might have been a better spot to fuse the permutation other than + immediately before a convolution. + + Args: + module (nn.Module): ``nn.Conv3d`` & ``nn.ConvTranspose3d`` or container + ``nn.Module`` + memory_format: user specified ``memory_format``, + e.g. ``torch.channels_last`` or ``torch.contiguous_format`` + + Returns: + The original module with updated ``nn.Conv3d`` + + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> # xdoctest: +REQUIRES(env:CUBLAS_WORKSPACE_CONFIG) + >>> input = torch.randint(1, 10, (2, 8, 4, 4, 4), dtype=torch.float16, device="cuda") + >>> model = nn.Sequential( + >>> nn.Conv3d(8, 4, 3)).cuda().half() + >>> # This is identical to: + >>> # nn.utils.convert_conv3d_weight_memory_format(model, torch.channels_last) + >>> model = nn.utils.convert_conv3d_weight_memory_format(model, torch.channels_last) + >>> out = model(input) + """ + + # TODO: expand this to `_ConvNd` when channels_last support is extended + # beyond only 4d tensors. + if isinstance(module, (torch.nn.Conv3d, torch.nn.ConvTranspose3d)): + weight_data = module.weight.detach().clone().contiguous(memory_format=memory_format) + module.weight.data = weight_data.resize_(weight_data.size(), memory_format=memory_format) + for child in module.children(): + convert_conv3d_weight_memory_format(child, memory_format) + return module diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/parametrizations.py b/venv/lib/python3.10/site-packages/torch/nn/utils/parametrizations.py new file mode 100644 index 0000000000000000000000000000000000000000..f9b25bcac0cb7bbc67b8f99bfc24960b2e54b8f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/utils/parametrizations.py @@ -0,0 +1,571 @@ +from enum import Enum, auto + +import torch +from torch import Tensor +from ..utils import parametrize +from ..modules import Module +from .. import functional as F + +from typing import Optional + +__all__ = ['orthogonal', 'spectral_norm', 'weight_norm'] + + +def _is_orthogonal(Q, eps=None): + n, k = Q.size(-2), Q.size(-1) + Id = torch.eye(k, dtype=Q.dtype, device=Q.device) + # A reasonable eps, but not too large + eps = 10. * n * torch.finfo(Q.dtype).eps + return torch.allclose(Q.mH @ Q, Id, atol=eps) + + +def _make_orthogonal(A): + """Assume that A is a tall matrix. + + Compute the Q factor s.t. A = QR (A may be complex) and diag(R) is real and non-negative. + """ + X, tau = torch.geqrf(A) + Q = torch.linalg.householder_product(X, tau) + # The diagonal of X is the diagonal of R (which is always real) so we normalise by its signs + Q *= X.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2) + return Q + + +class _OrthMaps(Enum): + matrix_exp = auto() + cayley = auto() + householder = auto() + + +class _Orthogonal(Module): + base: Tensor + + def __init__(self, + weight, + orthogonal_map: _OrthMaps, + *, + use_trivialization=True) -> None: + super().__init__() + + # Note [Householder complex] + # For complex tensors, it is not possible to compute the tensor `tau` necessary for + # linalg.householder_product from the reflectors. + # To see this, note that the reflectors have a shape like: + # 0 0 0 + # * 0 0 + # * * 0 + # which, for complex matrices, give n(n-1) (real) parameters. Now, you need n^2 parameters + # to parametrize the unitary matrices. Saving tau on its own does not work either, because + # not every combination of `(A, tau)` gives a unitary matrix, meaning that if we optimise + # them as independent tensors we would not maintain the constraint + # An equivalent reasoning holds for rectangular matrices + if weight.is_complex() and orthogonal_map == _OrthMaps.householder: + raise ValueError("The householder parametrization does not support complex tensors.") + + self.shape = weight.shape + self.orthogonal_map = orthogonal_map + if use_trivialization: + self.register_buffer("base", None) + + def forward(self, X: torch.Tensor) -> torch.Tensor: + n, k = X.size(-2), X.size(-1) + transposed = n < k + if transposed: + X = X.mT + n, k = k, n + # Here n > k and X is a tall matrix + if self.orthogonal_map == _OrthMaps.matrix_exp or self.orthogonal_map == _OrthMaps.cayley: + # We just need n x k - k(k-1)/2 parameters + X = X.tril() + if n != k: + # Embed into a square matrix + X = torch.cat([X, X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1) + A = X - X.mH + # A is skew-symmetric (or skew-hermitian) + if self.orthogonal_map == _OrthMaps.matrix_exp: + Q = torch.matrix_exp(A) + elif self.orthogonal_map == _OrthMaps.cayley: + # Computes the Cayley retraction (I+A/2)(I-A/2)^{-1} + Id = torch.eye(n, dtype=A.dtype, device=A.device) + Q = torch.linalg.solve(torch.add(Id, A, alpha=-0.5), torch.add(Id, A, alpha=0.5)) + # Q is now orthogonal (or unitary) of size (..., n, n) + if n != k: + Q = Q[..., :k] + # Q is now the size of the X (albeit perhaps transposed) + else: + # X is real here, as we do not support householder with complex numbers + A = X.tril(diagonal=-1) + tau = 2. / (1. + (A * A).sum(dim=-2)) + Q = torch.linalg.householder_product(A, tau) + # The diagonal of X is 1's and -1's + # We do not want to differentiate through this or update the diagonal of X hence the casting + Q = Q * X.diagonal(dim1=-2, dim2=-1).int().unsqueeze(-2) + + if hasattr(self, "base"): + Q = self.base @ Q + if transposed: + Q = Q.mT + return Q # type: ignore[possibly-undefined] + + @torch.autograd.no_grad() + def right_inverse(self, Q: torch.Tensor) -> torch.Tensor: + if Q.shape != self.shape: + raise ValueError(f"Expected a matrix or batch of matrices of shape {self.shape}. " + f"Got a tensor of shape {Q.shape}.") + + Q_init = Q + n, k = Q.size(-2), Q.size(-1) + transpose = n < k + if transpose: + Q = Q.mT + n, k = k, n + + # We always make sure to always copy Q in every path + if not hasattr(self, "base"): + # Note [right_inverse expm cayley] + # If we do not have use_trivialization=True, we just implement the inverse of the forward + # map for the Householder. To see why, think that for the Cayley map, + # we would need to find the matrix X \in R^{n x k} such that: + # Y = torch.cat([X.tril(), X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1) + # A = Y - Y.mH + # cayley(A)[:, :k] + # gives the original tensor. It is not clear how to do this. + # Perhaps via some algebraic manipulation involving the QR like that of + # Corollary 2.2 in Edelman, Arias and Smith? + if self.orthogonal_map == _OrthMaps.cayley or self.orthogonal_map == _OrthMaps.matrix_exp: + raise NotImplementedError("It is not possible to assign to the matrix exponential " + "or the Cayley parametrizations when use_trivialization=False.") + + # If parametrization == _OrthMaps.householder, make Q orthogonal via the QR decomposition. + # Here Q is always real because we do not support householder and complex matrices. + # See note [Householder complex] + A, tau = torch.geqrf(Q) + # We want to have a decomposition X = QR with diag(R) > 0, as otherwise we could + # decompose an orthogonal matrix Q as Q = (-Q)@(-Id), which is a valid QR decomposition + # The diagonal of Q is the diagonal of R from the qr decomposition + A.diagonal(dim1=-2, dim2=-1).sign_() + # Equality with zero is ok because LAPACK returns exactly zero when it does not want + # to use a particular reflection + A.diagonal(dim1=-2, dim2=-1)[tau == 0.] *= -1 + return A.mT if transpose else A + else: + if n == k: + # We check whether Q is orthogonal + if not _is_orthogonal(Q): + Q = _make_orthogonal(Q) + else: # Is orthogonal + Q = Q.clone() + else: + # Complete Q into a full n x n orthogonal matrix + N = torch.randn(*(Q.size()[:-2] + (n, n - k)), dtype=Q.dtype, device=Q.device) + Q = torch.cat([Q, N], dim=-1) + Q = _make_orthogonal(Q) + self.base = Q + + # It is necessary to return the -Id, as we use the diagonal for the + # Householder parametrization. Using -Id makes: + # householder(torch.zeros(m,n)) == torch.eye(m,n) + # Poor man's version of eye_like + neg_Id = torch.zeros_like(Q_init) + neg_Id.diagonal(dim1=-2, dim2=-1).fill_(-1.) + return neg_Id + + +def orthogonal(module: Module, + name: str = 'weight', + orthogonal_map: Optional[str] = None, + *, + use_trivialization: bool = True) -> Module: + r"""Apply an orthogonal or unitary parametrization to a matrix or a batch of matrices. + + Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, the parametrized + matrix :math:`Q \in \mathbb{K}^{m \times n}` is **orthogonal** as + + .. math:: + + \begin{align*} + Q^{\text{H}}Q &= \mathrm{I}_n \mathrlap{\qquad \text{if }m \geq n}\\ + QQ^{\text{H}} &= \mathrm{I}_m \mathrlap{\qquad \text{if }m < n} + \end{align*} + + where :math:`Q^{\text{H}}` is the conjugate transpose when :math:`Q` is complex + and the transpose when :math:`Q` is real-valued, and + :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix. + In plain words, :math:`Q` will have orthonormal columns whenever :math:`m \geq n` + and orthonormal rows otherwise. + + If the tensor has more than two dimensions, we consider it as a batch of matrices of shape `(..., m, n)`. + + The matrix :math:`Q` may be parametrized via three different ``orthogonal_map`` in terms of the original tensor: + + - ``"matrix_exp"``/``"cayley"``: + the :func:`~torch.matrix_exp` :math:`Q = \exp(A)` and the `Cayley map`_ + :math:`Q = (\mathrm{I}_n + A/2)(\mathrm{I}_n - A/2)^{-1}` are applied to a skew-symmetric + :math:`A` to give an orthogonal matrix. + - ``"householder"``: computes a product of Householder reflectors + (:func:`~torch.linalg.householder_product`). + + ``"matrix_exp"``/``"cayley"`` often make the parametrized weight converge faster than + ``"householder"``, but they are slower to compute for very thin or very wide matrices. + + If ``use_trivialization=True`` (default), the parametrization implements the "Dynamic Trivialization Framework", + where an extra matrix :math:`B \in \mathbb{K}^{n \times n}` is stored under + ``module.parametrizations.weight[0].base``. This helps the + convergence of the parametrized layer at the expense of some extra memory use. + See `Trivializations for Gradient-Based Optimization on Manifolds`_ . + + Initial value of :math:`Q`: + If the original tensor is not parametrized and ``use_trivialization=True`` (default), the initial value + of :math:`Q` is that of the original tensor if it is orthogonal (or unitary in the complex case) + and it is orthogonalized via the QR decomposition otherwise (see :func:`torch.linalg.qr`). + Same happens when it is not parametrized and ``orthogonal_map="householder"`` even when ``use_trivialization=False``. + Otherwise, the initial value is the result of the composition of all the registered + parametrizations applied to the original tensor. + + .. note:: + This function is implemented using the parametrization functionality + in :func:`~torch.nn.utils.parametrize.register_parametrization`. + + + .. _`Cayley map`: https://en.wikipedia.org/wiki/Cayley_transform#Matrix_map + .. _`Trivializations for Gradient-Based Optimization on Manifolds`: https://arxiv.org/abs/1909.09501 + + Args: + module (nn.Module): module on which to register the parametrization. + name (str, optional): name of the tensor to make orthogonal. Default: ``"weight"``. + orthogonal_map (str, optional): One of the following: ``"matrix_exp"``, ``"cayley"``, ``"householder"``. + Default: ``"matrix_exp"`` if the matrix is square or complex, ``"householder"`` otherwise. + use_trivialization (bool, optional): whether to use the dynamic trivialization framework. + Default: ``True``. + + Returns: + The original module with an orthogonal parametrization registered to the specified + weight + + Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) + >>> orth_linear = orthogonal(nn.Linear(20, 40)) + >>> orth_linear + ParametrizedLinear( + in_features=20, out_features=40, bias=True + (parametrizations): ModuleDict( + (weight): ParametrizationList( + (0): _Orthogonal() + ) + ) + ) + >>> # xdoctest: +IGNORE_WANT + >>> Q = orth_linear.weight + >>> torch.dist(Q.T @ Q, torch.eye(20)) + tensor(4.9332e-07) + """ + weight = getattr(module, name, None) + if not isinstance(weight, Tensor): + raise ValueError( + f"Module '{module}' has no parameter or buffer with name '{name}'" + ) + + # We could implement this for 1-dim tensors as the maps on the sphere + # but I believe it'd bite more people than it'd help + if weight.ndim < 2: + raise ValueError("Expected a matrix or batch of matrices. " + f"Got a tensor of {weight.ndim} dimensions.") + + if orthogonal_map is None: + orthogonal_map = "matrix_exp" if weight.size(-2) == weight.size(-1) or weight.is_complex() else "householder" + + orth_enum = getattr(_OrthMaps, orthogonal_map, None) + if orth_enum is None: + raise ValueError('orthogonal_map has to be one of "matrix_exp", "cayley", "householder". ' + f'Got: {orthogonal_map}') + orth = _Orthogonal(weight, + orth_enum, + use_trivialization=use_trivialization) + parametrize.register_parametrization(module, name, orth, unsafe=True) + return module + + +class _WeightNorm(Module): + def __init__( + self, + dim: Optional[int] = 0, + ) -> None: + super().__init__() + if dim is None: + dim = -1 + self.dim = dim + + def forward(self, weight_g, weight_v): + return torch._weight_norm(weight_v, weight_g, self.dim) + + def right_inverse(self, weight): + weight_g = torch.norm_except_dim(weight, 2, self.dim) + weight_v = weight + + return weight_g, weight_v + + +def weight_norm(module: Module, name: str = 'weight', dim: int = 0): + r"""Apply weight normalization to a parameter in the given module. + + .. math:: + \mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|} + + Weight normalization is a reparameterization that decouples the magnitude + of a weight tensor from its direction. This replaces the parameter specified + by :attr:`name` with two parameters: one specifying the magnitude + and one specifying the direction. + + By default, with ``dim=0``, the norm is computed independently per output + channel/plane. To compute a norm over the entire weight tensor, use + ``dim=None``. + + See https://arxiv.org/abs/1602.07868 + + Args: + module (Module): containing module + name (str, optional): name of weight parameter + dim (int, optional): dimension over which to compute the norm + + Returns: + The original module with the weight norm hook + + Example:: + + >>> m = weight_norm(nn.Linear(20, 40), name='weight') + >>> m + ParametrizedLinear( + in_features=20, out_features=40, bias=True + (parametrizations): ModuleDict( + (weight): ParametrizationList( + (0): _WeightNorm() + ) + ) + ) + >>> m.parametrizations.weight.original0.size() + torch.Size([40, 1]) + >>> m.parametrizations.weight.original1.size() + torch.Size([40, 20]) + + """ + _weight_norm = _WeightNorm(dim) + parametrize.register_parametrization(module, name, _weight_norm, unsafe=True) + + def _weight_norm_compat_hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): + g_key = f"{prefix}{name}_g" + v_key = f"{prefix}{name}_v" + if g_key in state_dict and v_key in state_dict: + original0 = state_dict.pop(g_key) + original1 = state_dict.pop(v_key) + state_dict[f"{prefix}parametrizations.{name}.original0"] = original0 + state_dict[f"{prefix}parametrizations.{name}.original1"] = original1 + module._register_load_state_dict_pre_hook(_weight_norm_compat_hook) + return module + + +class _SpectralNorm(Module): + def __init__( + self, + weight: torch.Tensor, + n_power_iterations: int = 1, + dim: int = 0, + eps: float = 1e-12 + ) -> None: + super().__init__() + ndim = weight.ndim + if dim >= ndim or dim < -ndim: + raise IndexError("Dimension out of range (expected to be in range of " + f"[-{ndim}, {ndim - 1}] but got {dim})") + + if n_power_iterations <= 0: + raise ValueError('Expected n_power_iterations to be positive, but ' + f'got n_power_iterations={n_power_iterations}') + self.dim = dim if dim >= 0 else dim + ndim + self.eps = eps + if ndim > 1: + # For ndim == 1 we do not need to approximate anything (see _SpectralNorm.forward) + self.n_power_iterations = n_power_iterations + weight_mat = self._reshape_weight_to_matrix(weight) + h, w = weight_mat.size() + + u = weight_mat.new_empty(h).normal_(0, 1) + v = weight_mat.new_empty(w).normal_(0, 1) + self.register_buffer('_u', F.normalize(u, dim=0, eps=self.eps)) + self.register_buffer('_v', F.normalize(v, dim=0, eps=self.eps)) + + # Start with u, v initialized to some reasonable values by performing a number + # of iterations of the power method + self._power_method(weight_mat, 15) + + def _reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor: + # Precondition + assert weight.ndim > 1 + + if self.dim != 0: + # permute dim to front + weight = weight.permute(self.dim, *(d for d in range(weight.dim()) if d != self.dim)) + + return weight.flatten(1) + + @torch.autograd.no_grad() + def _power_method(self, weight_mat: torch.Tensor, n_power_iterations: int) -> None: + # See original note at torch/nn/utils/spectral_norm.py + # NB: If `do_power_iteration` is set, the `u` and `v` vectors are + # updated in power iteration **in-place**. This is very important + # because in `DataParallel` forward, the vectors (being buffers) are + # broadcast from the parallelized module to each module replica, + # which is a new module object created on the fly. And each replica + # runs its own spectral norm power iteration. So simply assigning + # the updated vectors to the module this function runs on will cause + # the update to be lost forever. And the next time the parallelized + # module is replicated, the same randomly initialized vectors are + # broadcast and used! + # + # Therefore, to make the change propagate back, we rely on two + # important behaviors (also enforced via tests): + # 1. `DataParallel` doesn't clone storage if the broadcast tensor + # is already on correct device; and it makes sure that the + # parallelized module is already on `device[0]`. + # 2. If the out tensor in `out=` kwarg has correct shape, it will + # just fill in the values. + # Therefore, since the same power iteration is performed on all + # devices, simply updating the tensors in-place will make sure that + # the module replica on `device[0]` will update the _u vector on the + # parallelized module (by shared storage). + # + # However, after we update `u` and `v` in-place, we need to **clone** + # them before using them to normalize the weight. This is to support + # backproping through two forward passes, e.g., the common pattern in + # GAN training: loss = D(real) - D(fake). Otherwise, engine will + # complain that variables needed to do backward for the first forward + # (i.e., the `u` and `v` vectors) are changed in the second forward. + + # Precondition + assert weight_mat.ndim > 1 + + for _ in range(n_power_iterations): + # Spectral norm of weight equals to `u^T W v`, where `u` and `v` + # are the first left and right singular vectors. + # This power iteration produces approximations of `u` and `v`. + self._u = F.normalize(torch.mv(weight_mat, self._v), # type: ignore[has-type] + dim=0, eps=self.eps, out=self._u) # type: ignore[has-type] + self._v = F.normalize(torch.mv(weight_mat.H, self._u), + dim=0, eps=self.eps, out=self._v) # type: ignore[has-type] + + def forward(self, weight: torch.Tensor) -> torch.Tensor: + if weight.ndim == 1: + # Faster and more exact path, no need to approximate anything + return F.normalize(weight, dim=0, eps=self.eps) + else: + weight_mat = self._reshape_weight_to_matrix(weight) + if self.training: + self._power_method(weight_mat, self.n_power_iterations) + # See above on why we need to clone + u = self._u.clone(memory_format=torch.contiguous_format) + v = self._v.clone(memory_format=torch.contiguous_format) + # The proper way of computing this should be through F.bilinear, but + # it seems to have some efficiency issues: + # https://github.com/pytorch/pytorch/issues/58093 + sigma = torch.vdot(u, torch.mv(weight_mat, v)) + return weight / sigma + + def right_inverse(self, value: torch.Tensor) -> torch.Tensor: + # we may want to assert here that the passed value already + # satisfies constraints + return value + + +def spectral_norm(module: Module, + name: str = 'weight', + n_power_iterations: int = 1, + eps: float = 1e-12, + dim: Optional[int] = None) -> Module: + r"""Apply spectral normalization to a parameter in the given module. + + .. math:: + \mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})}, + \sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2} + + When applied on a vector, it simplifies to + + .. math:: + \mathbf{x}_{SN} = \dfrac{\mathbf{x}}{\|\mathbf{x}\|_2} + + Spectral normalization stabilizes the training of discriminators (critics) + in Generative Adversarial Networks (GANs) by reducing the Lipschitz constant + of the model. :math:`\sigma` is approximated performing one iteration of the + `power method`_ every time the weight is accessed. If the dimension of the + weight tensor is greater than 2, it is reshaped to 2D in power iteration + method to get spectral norm. + + + See `Spectral Normalization for Generative Adversarial Networks`_ . + + .. _`power method`: https://en.wikipedia.org/wiki/Power_iteration + .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 + + .. note:: + This function is implemented using the parametrization functionality + in :func:`~torch.nn.utils.parametrize.register_parametrization`. It is a + reimplementation of :func:`torch.nn.utils.spectral_norm`. + + .. note:: + When this constraint is registered, the singular vectors associated to the largest + singular value are estimated rather than sampled at random. These are then updated + performing :attr:`n_power_iterations` of the `power method`_ whenever the tensor + is accessed with the module on `training` mode. + + .. note:: + If the `_SpectralNorm` module, i.e., `module.parametrization.weight[idx]`, + is in training mode on removal, it will perform another power iteration. + If you'd like to avoid this iteration, set the module to eval mode + before its removal. + + Args: + module (nn.Module): containing module + name (str, optional): name of weight parameter. Default: ``"weight"``. + n_power_iterations (int, optional): number of power iterations to + calculate spectral norm. Default: ``1``. + eps (float, optional): epsilon for numerical stability in + calculating norms. Default: ``1e-12``. + dim (int, optional): dimension corresponding to number of outputs. + Default: ``0``, except for modules that are instances of + ConvTranspose{1,2,3}d, when it is ``1`` + + Returns: + The original module with a new parametrization registered to the specified + weight + + Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> snm = spectral_norm(nn.Linear(20, 40)) + >>> snm + ParametrizedLinear( + in_features=20, out_features=40, bias=True + (parametrizations): ModuleDict( + (weight): ParametrizationList( + (0): _SpectralNorm() + ) + ) + ) + >>> torch.linalg.matrix_norm(snm.weight, 2) + tensor(1.0081, grad_fn=) + """ + weight = getattr(module, name, None) + if not isinstance(weight, Tensor): + raise ValueError( + f"Module '{module}' has no parameter or buffer with name '{name}'" + ) + + if dim is None: + if isinstance(module, (torch.nn.ConvTranspose1d, + torch.nn.ConvTranspose2d, + torch.nn.ConvTranspose3d)): + dim = 1 + else: + dim = 0 + parametrize.register_parametrization(module, name, _SpectralNorm(weight, n_power_iterations, dim, eps)) + return module diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/spectral_norm.py b/venv/lib/python3.10/site-packages/torch/nn/utils/spectral_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..bda54b9a1222203791556b0fc2193bab59f33644 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/utils/spectral_norm.py @@ -0,0 +1,312 @@ +"""Spectral Normalization from https://arxiv.org/abs/1802.05957.""" +import torch +from torch.nn.functional import normalize +from typing import Any, Optional, TypeVar +from ..modules import Module + +__all__ = ['SpectralNorm', 'SpectralNormLoadStateDictPreHook', 'SpectralNormStateDictHook', + 'spectral_norm', 'remove_spectral_norm'] + +class SpectralNorm: + # Invariant before and after each forward call: + # u = normalize(W @ v) + # NB: At initialization, this invariant is not enforced + + _version: int = 1 + # At version 1: + # made `W` not a buffer, + # added `v` as a buffer, and + # made eval mode use `W = u @ W_orig @ v` rather than the stored `W`. + name: str + dim: int + n_power_iterations: int + eps: float + + def __init__(self, name: str = 'weight', n_power_iterations: int = 1, dim: int = 0, eps: float = 1e-12) -> None: + self.name = name + self.dim = dim + if n_power_iterations <= 0: + raise ValueError('Expected n_power_iterations to be positive, but ' + f'got n_power_iterations={n_power_iterations}') + self.n_power_iterations = n_power_iterations + self.eps = eps + + def reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor: + weight_mat = weight + if self.dim != 0: + # permute dim to front + weight_mat = weight_mat.permute(self.dim, + *[d for d in range(weight_mat.dim()) if d != self.dim]) + height = weight_mat.size(0) + return weight_mat.reshape(height, -1) + + def compute_weight(self, module: Module, do_power_iteration: bool) -> torch.Tensor: + # NB: If `do_power_iteration` is set, the `u` and `v` vectors are + # updated in power iteration **in-place**. This is very important + # because in `DataParallel` forward, the vectors (being buffers) are + # broadcast from the parallelized module to each module replica, + # which is a new module object created on the fly. And each replica + # runs its own spectral norm power iteration. So simply assigning + # the updated vectors to the module this function runs on will cause + # the update to be lost forever. And the next time the parallelized + # module is replicated, the same randomly initialized vectors are + # broadcast and used! + # + # Therefore, to make the change propagate back, we rely on two + # important behaviors (also enforced via tests): + # 1. `DataParallel` doesn't clone storage if the broadcast tensor + # is already on correct device; and it makes sure that the + # parallelized module is already on `device[0]`. + # 2. If the out tensor in `out=` kwarg has correct shape, it will + # just fill in the values. + # Therefore, since the same power iteration is performed on all + # devices, simply updating the tensors in-place will make sure that + # the module replica on `device[0]` will update the _u vector on the + # parallelized module (by shared storage). + # + # However, after we update `u` and `v` in-place, we need to **clone** + # them before using them to normalize the weight. This is to support + # backproping through two forward passes, e.g., the common pattern in + # GAN training: loss = D(real) - D(fake). Otherwise, engine will + # complain that variables needed to do backward for the first forward + # (i.e., the `u` and `v` vectors) are changed in the second forward. + weight = getattr(module, self.name + '_orig') + u = getattr(module, self.name + '_u') + v = getattr(module, self.name + '_v') + weight_mat = self.reshape_weight_to_matrix(weight) + + if do_power_iteration: + with torch.no_grad(): + for _ in range(self.n_power_iterations): + # Spectral norm of weight equals to `u^T W v`, where `u` and `v` + # are the first left and right singular vectors. + # This power iteration produces approximations of `u` and `v`. + v = normalize(torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v) + u = normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u) + if self.n_power_iterations > 0: + # See above on why we need to clone + u = u.clone(memory_format=torch.contiguous_format) + v = v.clone(memory_format=torch.contiguous_format) + + sigma = torch.dot(u, torch.mv(weight_mat, v)) + weight = weight / sigma + return weight + + def remove(self, module: Module) -> None: + with torch.no_grad(): + weight = self.compute_weight(module, do_power_iteration=False) + delattr(module, self.name) + delattr(module, self.name + '_u') + delattr(module, self.name + '_v') + delattr(module, self.name + '_orig') + module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) + + def __call__(self, module: Module, inputs: Any) -> None: + setattr(module, self.name, self.compute_weight(module, do_power_iteration=module.training)) + + def _solve_v_and_rescale(self, weight_mat, u, target_sigma): + # Tries to returns a vector `v` s.t. `u = normalize(W @ v)` + # (the invariant at top of this class) and `u @ W @ v = sigma`. + # This uses pinverse in case W^T W is not invertible. + v = torch.linalg.multi_dot([weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)]).squeeze(1) + return v.mul_(target_sigma / torch.dot(u, torch.mv(weight_mat, v))) + + @staticmethod + def apply(module: Module, name: str, n_power_iterations: int, dim: int, eps: float) -> 'SpectralNorm': + for hook in module._forward_pre_hooks.values(): + if isinstance(hook, SpectralNorm) and hook.name == name: + raise RuntimeError(f"Cannot register two spectral_norm hooks on the same parameter {name}") + + fn = SpectralNorm(name, n_power_iterations, dim, eps) + weight = module._parameters[name] + if weight is None: + raise ValueError(f'`SpectralNorm` cannot be applied as parameter `{name}` is None') + if isinstance(weight, torch.nn.parameter.UninitializedParameter): + raise ValueError( + 'The module passed to `SpectralNorm` can\'t have uninitialized parameters. ' + 'Make sure to run the dummy forward before applying spectral normalization') + + with torch.no_grad(): + weight_mat = fn.reshape_weight_to_matrix(weight) + + h, w = weight_mat.size() + # randomly initialize `u` and `v` + u = normalize(weight.new_empty(h).normal_(0, 1), dim=0, eps=fn.eps) + v = normalize(weight.new_empty(w).normal_(0, 1), dim=0, eps=fn.eps) + + delattr(module, fn.name) + module.register_parameter(fn.name + "_orig", weight) + # We still need to assign weight back as fn.name because all sorts of + # things may assume that it exists, e.g., when initializing weights. + # However, we can't directly assign as it could be an nn.Parameter and + # gets added as a parameter. Instead, we register weight.data as a plain + # attribute. + setattr(module, fn.name, weight.data) + module.register_buffer(fn.name + "_u", u) + module.register_buffer(fn.name + "_v", v) + + module.register_forward_pre_hook(fn) + module._register_state_dict_hook(SpectralNormStateDictHook(fn)) + module._register_load_state_dict_pre_hook(SpectralNormLoadStateDictPreHook(fn)) + return fn + + +# This is a top level class because Py2 pickle doesn't like inner class nor an +# instancemethod. +class SpectralNormLoadStateDictPreHook: + # See docstring of SpectralNorm._version on the changes to spectral_norm. + def __init__(self, fn) -> None: + self.fn = fn + + # For state_dict with version None, (assuming that it has gone through at + # least one training forward), we have + # + # u = normalize(W_orig @ v) + # W = W_orig / sigma, where sigma = u @ W_orig @ v + # + # To compute `v`, we solve `W_orig @ x = u`, and let + # v = x / (u @ W_orig @ x) * (W / W_orig). + def __call__(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs) -> None: + fn = self.fn + version = local_metadata.get('spectral_norm', {}).get(fn.name + '.version', None) + if version is None or version < 1: + weight_key = prefix + fn.name + if version is None and all(weight_key + s in state_dict for s in ('_orig', '_u', '_v')) and \ + weight_key not in state_dict: + # Detect if it is the updated state dict and just missing metadata. + # This could happen if the users are crafting a state dict themselves, + # so we just pretend that this is the newest. + return + has_missing_keys = False + for suffix in ('_orig', '', '_u'): + key = weight_key + suffix + if key not in state_dict: + has_missing_keys = True + if strict: + missing_keys.append(key) + if has_missing_keys: + return + with torch.no_grad(): + weight_orig = state_dict[weight_key + '_orig'] + weight = state_dict.pop(weight_key) + sigma = (weight_orig / weight).mean() + weight_mat = fn.reshape_weight_to_matrix(weight_orig) + u = state_dict[weight_key + '_u'] + v = fn._solve_v_and_rescale(weight_mat, u, sigma) + state_dict[weight_key + '_v'] = v + + +# This is a top level class because Py2 pickle doesn't like inner class nor an +# instancemethod. +class SpectralNormStateDictHook: + # See docstring of SpectralNorm._version on the changes to spectral_norm. + def __init__(self, fn) -> None: + self.fn = fn + + def __call__(self, module, state_dict, prefix, local_metadata) -> None: + if 'spectral_norm' not in local_metadata: + local_metadata['spectral_norm'] = {} + key = self.fn.name + '.version' + if key in local_metadata['spectral_norm']: + raise RuntimeError(f"Unexpected key in metadata['spectral_norm']: {key}") + local_metadata['spectral_norm'][key] = self.fn._version + + +T_module = TypeVar('T_module', bound=Module) + +def spectral_norm(module: T_module, + name: str = 'weight', + n_power_iterations: int = 1, + eps: float = 1e-12, + dim: Optional[int] = None) -> T_module: + r"""Apply spectral normalization to a parameter in the given module. + + .. math:: + \mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})}, + \sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2} + + Spectral normalization stabilizes the training of discriminators (critics) + in Generative Adversarial Networks (GANs) by rescaling the weight tensor + with spectral norm :math:`\sigma` of the weight matrix calculated using + power iteration method. If the dimension of the weight tensor is greater + than 2, it is reshaped to 2D in power iteration method to get spectral + norm. This is implemented via a hook that calculates spectral norm and + rescales weight before every :meth:`~Module.forward` call. + + See `Spectral Normalization for Generative Adversarial Networks`_ . + + .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 + + Args: + module (nn.Module): containing module + name (str, optional): name of weight parameter + n_power_iterations (int, optional): number of power iterations to + calculate spectral norm + eps (float, optional): epsilon for numerical stability in + calculating norms + dim (int, optional): dimension corresponding to number of outputs, + the default is ``0``, except for modules that are instances of + ConvTranspose{1,2,3}d, when it is ``1`` + + Returns: + The original module with the spectral norm hook + + .. note:: + This function has been reimplemented as + :func:`torch.nn.utils.parametrizations.spectral_norm` using the new + parametrization functionality in + :func:`torch.nn.utils.parametrize.register_parametrization`. Please use + the newer version. This function will be deprecated in a future version + of PyTorch. + + Example:: + + >>> m = spectral_norm(nn.Linear(20, 40)) + >>> m + Linear(in_features=20, out_features=40, bias=True) + >>> m.weight_u.size() + torch.Size([40]) + + """ + if dim is None: + if isinstance(module, (torch.nn.ConvTranspose1d, + torch.nn.ConvTranspose2d, + torch.nn.ConvTranspose3d)): + dim = 1 + else: + dim = 0 + SpectralNorm.apply(module, name, n_power_iterations, dim, eps) + return module + + +def remove_spectral_norm(module: T_module, name: str = 'weight') -> T_module: + r"""Remove the spectral normalization reparameterization from a module. + + Args: + module (Module): containing module + name (str, optional): name of weight parameter + + Example: + >>> m = spectral_norm(nn.Linear(40, 10)) + >>> remove_spectral_norm(m) + """ + for k, hook in module._forward_pre_hooks.items(): + if isinstance(hook, SpectralNorm) and hook.name == name: + hook.remove(module) + del module._forward_pre_hooks[k] + break + else: + raise ValueError(f"spectral_norm of '{name}' not found in {module}") + + for k, hook in module._state_dict_hooks.items(): + if isinstance(hook, SpectralNormStateDictHook) and hook.fn.name == name: + del module._state_dict_hooks[k] + break + + for k, hook in module._load_state_dict_pre_hooks.items(): + if isinstance(hook, SpectralNormLoadStateDictPreHook) and hook.fn.name == name: + del module._load_state_dict_pre_hooks[k] + break + + return module diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/stateless.py b/venv/lib/python3.10/site-packages/torch/nn/utils/stateless.py new file mode 100644 index 0000000000000000000000000000000000000000..ae7ebcdf3df7f00cc9bde5b108b81c65eb0f884b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/utils/stateless.py @@ -0,0 +1,263 @@ +import contextlib +import warnings +from collections import defaultdict +from typing import Any, Dict, Iterator, Optional, Set, Tuple, Union + +import torch +from torch import Tensor +from torch.nn.utils._named_member_accessor import NamedMemberAccessor + +__all__ = ["functional_call"] + + +def _untie_named_tensors_map( + module: "torch.nn.Module", + parameters_and_buffers: Dict[str, Tensor], +) -> Dict[str, Tensor]: + """ + Unties all tied tensors in the module to parameters_and_buffers. + + This function returns a new untied_parameters_and_buffers dictionary and leave the original + untied_parameters_and_buffers dictionary unchanged. It adds new (missing) keys for tied tensors + in the module to untied_parameters_and_buffers. The value of the new key is the user-given value + in the original parameters_and_buffers dictionary. + + If there are more than one user-given values for the same tied tensor, it will raise an error. + + For example, if the module has two tied weights self.foo and self.tied_foo and the user passes + {'foo': foo_value, ...}, this will return {'foo': foo_value, 'tied_foo': foo_value, ...}. If the + user passes {'foo': foo_value, 'tied_foo': tied_foo_value, ...}, it will raise an error. If the + user passes {'foo': foo_value, 'tied_foo': foo_value, ...}, it will not raise an error. + + Args: + module (torch.nn.Module): the module to determine which tensors are tied. + parameters_and_buffers (Dict[str, Tensor]): a map of {name: tensor} for reparamaterizing the module. + + Returns: + A new untied version of the parameters_and_buffers dictionary. + + Raises: + ValueError: if there are more than one user-given values for the same tied tensor. + """ + # A map of {name: tensor} for all tensors (including tied ones) in the module. + all_named_tensors: Dict[str, Tensor] = {} + all_named_tensors.update(module.named_parameters(remove_duplicate=False)) + all_named_tensors.update(module.named_buffers(remove_duplicate=False)) + + # A map of {tensor: set(all_tied_names)} for all tensor names in the module. + tensor_to_tied_names_map: Dict[Tensor, Set[str]] = defaultdict(set) + for name, tensor in all_named_tensors.items(): + tensor_to_tied_names_map[tensor].add(name) + + # A map of {tied_name: set(all_tied_names)} for all tensor names in the module. + # If a name is not tied, it will not be in this map. + tied_names_map: Dict[str, Set[str]] = {} + for tied_names in tensor_to_tied_names_map.values(): + if len(tied_names) > 1: + for tied_name in tied_names: + tied_names_map[tied_name] = tied_names + + # Make sure the user didn't pass multiple values for the same tied tensor. + given_names = set(parameters_and_buffers.keys()) + given_names_for_tied_tensors = given_names.intersection(tied_names_map.keys()) + for given_name in given_names_for_tied_tensors: + tied_names = tied_names_map[given_name] + if ( + # Detect if there are multiple keys present for the same tied tensor. + len(tied_names.intersection(given_names_for_tied_tensors)) > 1 + # Only raise an error if the user passed multiple values for the same tied tensor. + # If all given values are the same, don't raise. + and len({parameters_and_buffers[tied_name] for tied_name in tied_names}) + != 1 + ): + raise ValueError( + f"functional_call got multiple values for keys {sorted(tied_names)}, " + f"which are tied. Consider using tie_weights=False" + ) + + # Untie the given named tensor map + # Make a copy for not modifying the original dict + untied_parameters_and_buffers = parameters_and_buffers.copy() + for given_name in given_names_for_tied_tensors: + for tied_name in tied_names_map[given_name]: + untied_parameters_and_buffers[tied_name] = parameters_and_buffers[ + given_name + ] + return untied_parameters_and_buffers + + +@contextlib.contextmanager +def _reparametrize_module( + module: "torch.nn.Module", + parameters_and_buffers: Dict[str, Tensor], + *, + tie_weights: bool = False, + strict: bool = False, +) -> Iterator[None]: + if tie_weights: + untied_parameters_and_buffers = _untie_named_tensors_map( + module, parameters_and_buffers + ) + else: + untied_parameters_and_buffers = parameters_and_buffers + + accessor = NamedMemberAccessor(module) + if strict: + missing_keys, unexpected_keys = accessor.check_keys( + untied_parameters_and_buffers + ) + error_msgs = [] + if len(unexpected_keys) > 0: + error_msgs.append( + f"Unexpected key(s): {', '.join(map(repr, unexpected_keys))}." + ) + if len(missing_keys) > 0: + error_msgs.append(f"Missing key(s): {', '.join(map(repr, missing_keys))}.") + if len(error_msgs) > 0: + raise RuntimeError( + "Error(s) in reparametrizing for {}:\n\t{}".format( + module._get_name(), "\n\t".join(error_msgs) + ) + ) + + orig_parameters_and_buffers: Dict[str, Tensor] = {} + try: + orig_parameters_and_buffers, _ = accessor.swap_tensors_dict( + untied_parameters_and_buffers, allow_missing=True + ) + yield + finally: + new_parameters_and_buffers, _ = accessor.swap_tensors_dict( + orig_parameters_and_buffers, allow_missing=True + ) + # Sometimes the module is not completely stateless and has some in-place modifications on + # the _parameters and _buffers dictionaries. + # Write the changed parameters and buffers back to the original dict. + parameters_and_buffers.update( + { + k: new_parameters_and_buffers[k] + for k in parameters_and_buffers + if k in new_parameters_and_buffers + } + ) + + +def functional_call( + module: "torch.nn.Module", + parameters_and_buffers: Dict[str, Tensor], + args: Union[Any, Tuple], + kwargs: Optional[Dict[str, Any]] = None, + *, + tie_weights: bool = True, + strict: bool = False, +): + r"""Perform a functional call on the module by replacing the module parameters and buffers with the provided ones. + + .. warning:: + + This API is deprecated as of PyTorch 2.0 and will be removed in a future + version of PyTorch. Please use :func:`torch.func.functional_call` instead, + which is a drop-in replacement for this API. + + .. note:: If the module has active parametrizations, passing a value in the + :attr:`parameters_and_buffers` argument with the name set to the regular parameter + name will completely disable the parametrization. + If you want to apply the parametrization function to the value passed + please set the key as ``{submodule_name}.parametrizations.{parameter_name}.original``. + + .. note:: If the module performs in-place operations on parameters/buffers, these will be reflected + in the `parameters_and_buffers` input. + + Example:: + + >>> a = {'foo': torch.zeros(())} + >>> # xdoctest: +SKIP + >>> mod = Foo() # does self.foo = self.foo + 1 + >>> print(mod.foo) # tensor(0.) + >>> functional_call(mod, a, torch.ones(())) + >>> print(mod.foo) # tensor(0.) + >>> print(a['foo']) # tensor(1.) + + .. note:: If the module has tied weights, whether or not functional_call respects the tying is determined by the + tie_weights flag. + + Example:: + + >>> a = {'foo': torch.zeros(())} + >>> # xdoctest: +SKIP + >>> mod = Foo() # has both self.foo and self.foo_tied which are tied. Returns x + self.foo + self.foo_tied + >>> print(mod.foo) # tensor(1.) + >>> mod(torch.zeros(())) # tensor(2.) + >>> functional_call(mod, a, torch.zeros(())) # tensor(0.) since it will change self.foo_tied too + >>> functional_call(mod, a, torch.zeros(()), tie_weights=False) # tensor(1.)--self.foo_tied is not updated + >>> new_a = {'foo': torch.zeros(()), 'foo_tied': torch.zeros(())} + >>> functional_call(mod, new_a, torch.zeros()) # tensor(0.) + + Args: + module (torch.nn.Module): the module to call + parameters_and_buffers (dict of str and Tensor): the parameters that will be used in + the module call. + args (Any or tuple): arguments to be passed to the module call. If not a tuple, considered a single argument. + kwargs (dict): keyword arguments to be passed to the module call + tie_weights (bool, optional): If True, then parameters and buffers tied in the original model will be treated as + tied in the reparamaterized version. Therefore, if True and different values are passed for the tied + parameters and buffers, it will error. If False, it will not respect the originally tied parameters and + buffers unless the values passed for both weights are the same. Default: True. + strict (bool, optional): If True, then the parameters and buffers passed in must match the parameters and + buffers in the original module. Therefore, if True and there are any missing or unexpected keys, it will + error. Default: False. + + Returns: + Any: the result of calling ``module``. + """ + warnings.warn( + "This API is deprecated as of PyTorch 2.0 and will be removed in a future " + "version of PyTorch. Please use torch.func.functional_call instead " + "which is a drop-in replacement for this API." + ) + + return _functional_call( + module, + parameters_and_buffers, + args, + kwargs, + tie_weights=tie_weights, + strict=strict, + ) + + +def _functional_call( + module: "torch.nn.Module", + parameters_and_buffers: Dict[str, Tensor], + args: Union[Any, Tuple], + kwargs: Optional[Dict[str, Any]] = None, + *, + tie_weights: bool = True, + strict: bool = False, +): + # TODO allow kwargs such as unsafe and others for parametrization + if ( + torch.jit.is_tracing() + or torch.jit.is_scripting() + or isinstance( + module, + ( + torch.jit.RecursiveScriptModule, + torch.jit.ScriptModule, + torch.jit.ScriptFunction, + ), + ) + ): + raise RuntimeError("The stateless API can't be used with Jitted modules") + if isinstance(module, torch.nn.DataParallel): + raise RuntimeError( + "The stateless API can't be used with nn.DataParallel module" + ) + if kwargs is None: + kwargs = {} + if not isinstance(args, tuple): + args = (args,) + with _reparametrize_module( + module, parameters_and_buffers, tie_weights=tie_weights, strict=strict + ): + return module(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/torch/nn/utils/weight_norm.py b/venv/lib/python3.10/site-packages/torch/nn/utils/weight_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..942a13a4eb83c4bac35f69f61bddf6ea6ca4645c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/nn/utils/weight_norm.py @@ -0,0 +1,151 @@ +r"""Weight Normalization from https://arxiv.org/abs/1602.07868.""" +from torch.nn.parameter import Parameter, UninitializedParameter +from torch import _weight_norm, norm_except_dim +from typing import Any, TypeVar +import warnings +from ..modules import Module + +__all__ = ['WeightNorm', 'weight_norm', 'remove_weight_norm'] + +class WeightNorm: + name: str + dim: int + + def __init__(self, name: str, dim: int) -> None: + if dim is None: + dim = -1 + self.name = name + self.dim = dim + + # TODO Make return type more specific + def compute_weight(self, module: Module) -> Any: + g = getattr(module, self.name + '_g') + v = getattr(module, self.name + '_v') + return _weight_norm(v, g, self.dim) + + @staticmethod + def apply(module, name: str, dim: int) -> 'WeightNorm': + warnings.warn("torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.") + + for hook in module._forward_pre_hooks.values(): + if isinstance(hook, WeightNorm) and hook.name == name: + raise RuntimeError(f"Cannot register two weight_norm hooks on the same parameter {name}") + + if dim is None: + dim = -1 + + fn = WeightNorm(name, dim) + + weight = getattr(module, name) + if isinstance(weight, UninitializedParameter): + raise ValueError( + 'The module passed to `WeightNorm` can\'t have uninitialized parameters. ' + 'Make sure to run the dummy forward before applying weight normalization') + # remove w from parameter list + del module._parameters[name] + + # add g and v as new parameters and express w as g/||v|| * v + module.register_parameter(name + '_g', Parameter(norm_except_dim(weight, 2, dim).data)) + module.register_parameter(name + '_v', Parameter(weight.data)) + setattr(module, name, fn.compute_weight(module)) + + # recompute weight before every forward() + module.register_forward_pre_hook(fn) + + return fn + + def remove(self, module: Module) -> None: + weight = self.compute_weight(module) + delattr(module, self.name) + del module._parameters[self.name + '_g'] + del module._parameters[self.name + '_v'] + setattr(module, self.name, Parameter(weight.data)) + + def __call__(self, module: Module, inputs: Any) -> None: + setattr(module, self.name, self.compute_weight(module)) + + +T_module = TypeVar('T_module', bound=Module) + +def weight_norm(module: T_module, name: str = 'weight', dim: int = 0) -> T_module: + r"""Apply weight normalization to a parameter in the given module. + + .. math:: + \mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|} + + Weight normalization is a reparameterization that decouples the magnitude + of a weight tensor from its direction. This replaces the parameter specified + by :attr:`name` (e.g. ``'weight'``) with two parameters: one specifying the magnitude + (e.g. ``'weight_g'``) and one specifying the direction (e.g. ``'weight_v'``). + Weight normalization is implemented via a hook that recomputes the weight + tensor from the magnitude and direction before every :meth:`~Module.forward` + call. + + By default, with ``dim=0``, the norm is computed independently per output + channel/plane. To compute a norm over the entire weight tensor, use + ``dim=None``. + + See https://arxiv.org/abs/1602.07868 + + .. warning:: + + This function is deprecated. Use :func:`torch.nn.utils.parametrizations.weight_norm` + which uses the modern parametrization API. The new ``weight_norm`` is compatible + with ``state_dict`` generated from old ``weight_norm``. + + Migration guide: + + * The magnitude (``weight_g``) and direction (``weight_v``) are now expressed + as ``parametrizations.weight.original0`` and ``parametrizations.weight.original1`` + respectively. If this is bothering you, please comment on + https://github.com/pytorch/pytorch/issues/102999 + + * To remove the weight normalization reparametrization, use + :func:`torch.nn.utils.parametrize.remove_parametrizations`. + + * The weight is no longer recomputed once at module forward; instead, it will + be recomputed on every access. To restore the old behavior, use + :func:`torch.nn.utils.parametrize.cached` before invoking the module + in question. + + Args: + module (Module): containing module + name (str, optional): name of weight parameter + dim (int, optional): dimension over which to compute the norm + + Returns: + The original module with the weight norm hook + + Example:: + + >>> m = weight_norm(nn.Linear(20, 40), name='weight') + >>> m + Linear(in_features=20, out_features=40, bias=True) + >>> m.weight_g.size() + torch.Size([40, 1]) + >>> m.weight_v.size() + torch.Size([40, 20]) + + """ + WeightNorm.apply(module, name, dim) + return module + + +def remove_weight_norm(module: T_module, name: str = 'weight') -> T_module: + r"""Remove the weight normalization reparameterization from a module. + + Args: + module (Module): containing module + name (str, optional): name of weight parameter + + Example: + >>> m = weight_norm(nn.Linear(20, 40)) + >>> remove_weight_norm(m) + """ + for k, hook in module._forward_pre_hooks.items(): + if isinstance(hook, WeightNorm) and hook.name == name: + hook.remove(module) + del module._forward_pre_hooks[k] + return module + + raise ValueError(f"weight_norm of '{name}' not found in {module}") diff --git a/venv/lib/python3.10/site-packages/torch/signal/__init__.py b/venv/lib/python3.10/site-packages/torch/signal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3684eabe71215fe96689181065a9432f127aa8e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/signal/__init__.py @@ -0,0 +1,5 @@ +from . import windows + +__all__ = [ + 'windows' +] diff --git a/venv/lib/python3.10/site-packages/torch/signal/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/signal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95254f0dbeb2a4fbdab2cb237e87b642481c4e80 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/signal/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/signal/windows/__init__.py b/venv/lib/python3.10/site-packages/torch/signal/windows/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d04cde6e5b9280a465fb5db739f102337de74717 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/signal/windows/__init__.py @@ -0,0 +1,28 @@ +from .windows import ( + bartlett, + blackman, + cosine, + exponential, + gaussian, + general_cosine, + general_hamming, + hamming, + hann, + kaiser, + nuttall, +) + + +__all__ = [ + 'bartlett', + 'blackman', + 'cosine', + 'exponential', + 'gaussian', + 'general_cosine', + 'general_hamming', + 'hamming', + 'hann', + 'kaiser', + 'nuttall', +] diff --git a/venv/lib/python3.10/site-packages/torch/signal/windows/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/signal/windows/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c0cbda42948e2a4a7d3993ceba70491cc729eea Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/signal/windows/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/signal/windows/__pycache__/windows.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/signal/windows/__pycache__/windows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91e938d0994479be2d6032aa86366637922b56f2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/signal/windows/__pycache__/windows.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/signal/windows/windows.py b/venv/lib/python3.10/site-packages/torch/signal/windows/windows.py new file mode 100644 index 0000000000000000000000000000000000000000..f2cbe3247c619adec31bf0d0ed3c96d2a6b979f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/signal/windows/windows.py @@ -0,0 +1,805 @@ +from typing import Optional, Iterable + +import torch +from math import sqrt + +from torch import Tensor +from torch._torch_docs import factory_common_args, parse_kwargs, merge_dicts + +__all__ = [ + 'bartlett', + 'blackman', + 'cosine', + 'exponential', + 'gaussian', + 'general_cosine', + 'general_hamming', + 'hamming', + 'hann', + 'kaiser', + 'nuttall', +] + +window_common_args = merge_dicts( + parse_kwargs( + """ + M (int): the length of the window. + In other words, the number of points of the returned window. + sym (bool, optional): If `False`, returns a periodic window suitable for use in spectral analysis. + If `True`, returns a symmetric window suitable for use in filter design. Default: `True`. +""" + ), + factory_common_args, + { + "normalization": "The window is normalized to 1 (maximum value is 1). However, the 1 doesn't appear if " + ":attr:`M` is even and :attr:`sym` is `True`.", + } +) + + +def _add_docstr(*args): + r"""Adds docstrings to a given decorated function. + + Specially useful when then docstrings needs string interpolation, e.g., with + str.format(). + REMARK: Do not use this function if the docstring doesn't need string + interpolation, just write a conventional docstring. + + Args: + args (str): + """ + + def decorator(o): + o.__doc__ = "".join(args) + return o + + return decorator + + +def _window_function_checks(function_name: str, M: int, dtype: torch.dtype, layout: torch.layout) -> None: + r"""Performs common checks for all the defined windows. + This function should be called before computing any window. + + Args: + function_name (str): name of the window function. + M (int): length of the window. + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + layout (:class:`torch.layout`): the desired layout of returned tensor. + """ + if M < 0: + raise ValueError(f'{function_name} requires non-negative window length, got M={M}') + if layout is not torch.strided: + raise ValueError(f'{function_name} is implemented for strided tensors only, got: {layout}') + if dtype not in [torch.float32, torch.float64]: + raise ValueError(f'{function_name} expects float32 or float64 dtypes, got: {dtype}') + + +@_add_docstr( + r""" +Computes a window with an exponential waveform. +Also known as Poisson window. + +The exponential window is defined as follows: + +.. math:: + w_n = \exp{\left(-\frac{|n - c|}{\tau}\right)} + +where `c` is the ``center`` of the window. + """, + r""" + +{normalization} + +Args: + {M} + +Keyword args: + center (float, optional): where the center of the window will be located. + Default: `M / 2` if `sym` is `False`, else `(M - 1) / 2`. + tau (float, optional): the decay value. + Tau is generally associated with a percentage, that means, that the value should + vary within the interval (0, 100]. If tau is 100, it is considered the uniform window. + Default: 1.0. + {sym} + {dtype} + {layout} + {device} + {requires_grad} + +Examples:: + + >>> # Generates a symmetric exponential window of size 10 and with a decay value of 1.0. + >>> # The center will be at (M - 1) / 2, where M is 10. + >>> torch.signal.windows.exponential(10) + tensor([0.0111, 0.0302, 0.0821, 0.2231, 0.6065, 0.6065, 0.2231, 0.0821, 0.0302, 0.0111]) + + >>> # Generates a periodic exponential window and decay factor equal to .5 + >>> torch.signal.windows.exponential(10, sym=False,tau=.5) + tensor([4.5400e-05, 3.3546e-04, 2.4788e-03, 1.8316e-02, 1.3534e-01, 1.0000e+00, 1.3534e-01, 1.8316e-02, 2.4788e-03, 3.3546e-04]) + """.format( + **window_common_args + ), +) +def exponential( + M: int, + *, + center: Optional[float] = None, + tau: float = 1.0, + sym: bool = True, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[torch.device] = None, + requires_grad: bool = False +) -> Tensor: + if dtype is None: + dtype = torch.get_default_dtype() + + _window_function_checks('exponential', M, dtype, layout) + + if tau <= 0: + raise ValueError(f'Tau must be positive, got: {tau} instead.') + + if sym and center is not None: + raise ValueError('Center must be None for symmetric windows') + + if M == 0: + return torch.empty((0,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad) + + if center is None: + center = (M if not sym and M > 1 else M - 1) / 2.0 + + constant = 1 / tau + + k = torch.linspace(start=-center * constant, + end=(-center + (M - 1)) * constant, + steps=M, + dtype=dtype, + layout=layout, + device=device, + requires_grad=requires_grad) + + return torch.exp(-torch.abs(k)) + + +@_add_docstr( + r""" +Computes a window with a simple cosine waveform, following the same implementation as SciPy. +This window is also known as the sine window. + +The cosine window is defined as follows: + +.. math:: + w_n = \sin\left(\frac{\pi (n + 0.5)}{M}\right) + +This formula differs from the typical cosine window formula by incorporating a 0.5 term in the numerator, +which shifts the sample positions. This adjustment results in a window that starts and ends with non-zero values. + +""", + r""" + +{normalization} + +Args: + {M} + +Keyword args: + {sym} + {dtype} + {layout} + {device} + {requires_grad} + +Examples:: + + >>> # Generates a symmetric cosine window. + >>> torch.signal.windows.cosine(10) + tensor([0.1564, 0.4540, 0.7071, 0.8910, 0.9877, 0.9877, 0.8910, 0.7071, 0.4540, 0.1564]) + + >>> # Generates a periodic cosine window. + >>> torch.signal.windows.cosine(10, sym=False) + tensor([0.1423, 0.4154, 0.6549, 0.8413, 0.9595, 1.0000, 0.9595, 0.8413, 0.6549, 0.4154]) +""".format( + **window_common_args, + ), +) +def cosine( + M: int, + *, + sym: bool = True, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[torch.device] = None, + requires_grad: bool = False +) -> Tensor: + if dtype is None: + dtype = torch.get_default_dtype() + + _window_function_checks('cosine', M, dtype, layout) + + if M == 0: + return torch.empty((0,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad) + + start = 0.5 + constant = torch.pi / (M + 1 if not sym and M > 1 else M) + + k = torch.linspace(start=start * constant, + end=(start + (M - 1)) * constant, + steps=M, + dtype=dtype, + layout=layout, + device=device, + requires_grad=requires_grad) + + return torch.sin(k) + + +@_add_docstr( + r""" +Computes a window with a gaussian waveform. + +The gaussian window is defined as follows: + +.. math:: + w_n = \exp{\left(-\left(\frac{n}{2\sigma}\right)^2\right)} + """, + r""" + +{normalization} + +Args: + {M} + +Keyword args: + std (float, optional): the standard deviation of the gaussian. It controls how narrow or wide the window is. + Default: 1.0. + {sym} + {dtype} + {layout} + {device} + {requires_grad} + +Examples:: + + >>> # Generates a symmetric gaussian window with a standard deviation of 1.0. + >>> torch.signal.windows.gaussian(10) + tensor([4.0065e-05, 2.1875e-03, 4.3937e-02, 3.2465e-01, 8.8250e-01, 8.8250e-01, 3.2465e-01, 4.3937e-02, 2.1875e-03, 4.0065e-05]) + + >>> # Generates a periodic gaussian window and standard deviation equal to 0.9. + >>> torch.signal.windows.gaussian(10, sym=False,std=0.9) + tensor([1.9858e-07, 5.1365e-05, 3.8659e-03, 8.4658e-02, 5.3941e-01, 1.0000e+00, 5.3941e-01, 8.4658e-02, 3.8659e-03, 5.1365e-05]) +""".format( + **window_common_args, + ), +) +def gaussian( + M: int, + *, + std: float = 1.0, + sym: bool = True, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[torch.device] = None, + requires_grad: bool = False +) -> Tensor: + if dtype is None: + dtype = torch.get_default_dtype() + + _window_function_checks('gaussian', M, dtype, layout) + + if std <= 0: + raise ValueError(f'Standard deviation must be positive, got: {std} instead.') + + if M == 0: + return torch.empty((0,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad) + + start = -(M if not sym and M > 1 else M - 1) / 2.0 + + constant = 1 / (std * sqrt(2)) + + k = torch.linspace(start=start * constant, + end=(start + (M - 1)) * constant, + steps=M, + dtype=dtype, + layout=layout, + device=device, + requires_grad=requires_grad) + + return torch.exp(-k ** 2) + + +@_add_docstr( + r""" +Computes the Kaiser window. + +The Kaiser window is defined as follows: + +.. math:: + w_n = I_0 \left( \beta \sqrt{1 - \left( {\frac{n - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta ) + +where ``I_0`` is the zeroth order modified Bessel function of the first kind (see :func:`torch.special.i0`), and +``N = M - 1 if sym else M``. + """, + r""" + +{normalization} + +Args: + {M} + +Keyword args: + beta (float, optional): shape parameter for the window. Must be non-negative. Default: 12.0 + {sym} + {dtype} + {layout} + {device} + {requires_grad} + +Examples:: + + >>> # Generates a symmetric gaussian window with a standard deviation of 1.0. + >>> torch.signal.windows.kaiser(5) + tensor([4.0065e-05, 2.1875e-03, 4.3937e-02, 3.2465e-01, 8.8250e-01, 8.8250e-01, 3.2465e-01, 4.3937e-02, 2.1875e-03, 4.0065e-05]) + >>> # Generates a periodic gaussian window and standard deviation equal to 0.9. + >>> torch.signal.windows.kaiser(5, sym=False,std=0.9) + tensor([1.9858e-07, 5.1365e-05, 3.8659e-03, 8.4658e-02, 5.3941e-01, 1.0000e+00, 5.3941e-01, 8.4658e-02, 3.8659e-03, 5.1365e-05]) +""".format( + **window_common_args, + ), +) +def kaiser( + M: int, + *, + beta: float = 12.0, + sym: bool = True, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[torch.device] = None, + requires_grad: bool = False +) -> Tensor: + if dtype is None: + dtype = torch.get_default_dtype() + + _window_function_checks('kaiser', M, dtype, layout) + + if beta < 0: + raise ValueError(f'beta must be non-negative, got: {beta} instead.') + + if M == 0: + return torch.empty((0,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad) + + if M == 1: + return torch.ones((1,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad) + + # Avoid NaNs by casting `beta` to the appropriate dtype. + beta = torch.tensor(beta, dtype=dtype, device=device) + + start = -beta + constant = 2.0 * beta / (M if not sym else M - 1) + end = torch.minimum(beta, start + (M - 1) * constant) + + k = torch.linspace(start=start, + end=end, + steps=M, + dtype=dtype, + layout=layout, + device=device, + requires_grad=requires_grad) + + return torch.i0(torch.sqrt(beta * beta - torch.pow(k, 2))) / torch.i0(beta) + + +@_add_docstr( + r""" +Computes the Hamming window. + +The Hamming window is defined as follows: + +.. math:: + w_n = \alpha - \beta\ \cos \left( \frac{2 \pi n}{M - 1} \right) + """, + r""" + +{normalization} + +Arguments: + {M} + +Keyword args: + {sym} + alpha (float, optional): The coefficient :math:`\alpha` in the equation above. + beta (float, optional): The coefficient :math:`\beta` in the equation above. + {dtype} + {layout} + {device} + {requires_grad} + +Examples:: + + >>> # Generates a symmetric Hamming window. + >>> torch.signal.windows.hamming(10) + tensor([0.0800, 0.1876, 0.4601, 0.7700, 0.9723, 0.9723, 0.7700, 0.4601, 0.1876, 0.0800]) + + >>> # Generates a periodic Hamming window. + >>> torch.signal.windows.hamming(10, sym=False) + tensor([0.0800, 0.1679, 0.3979, 0.6821, 0.9121, 1.0000, 0.9121, 0.6821, 0.3979, 0.1679]) +""".format( + **window_common_args + ), +) +def hamming(M: int, + *, + sym: bool = True, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[torch.device] = None, + requires_grad: bool = False) -> Tensor: + return general_hamming(M, sym=sym, dtype=dtype, layout=layout, device=device, requires_grad=requires_grad) + + +@_add_docstr( + r""" +Computes the Hann window. + +The Hann window is defined as follows: + +.. math:: + w_n = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{M - 1} \right)\right] = + \sin^2 \left( \frac{\pi n}{M - 1} \right) + """, + r""" + +{normalization} + +Arguments: + {M} + +Keyword args: + {sym} + {dtype} + {layout} + {device} + {requires_grad} + +Examples:: + + >>> # Generates a symmetric Hann window. + >>> torch.signal.windows.hann(10) + tensor([0.0000, 0.1170, 0.4132, 0.7500, 0.9698, 0.9698, 0.7500, 0.4132, 0.1170, 0.0000]) + + >>> # Generates a periodic Hann window. + >>> torch.signal.windows.hann(10, sym=False) + tensor([0.0000, 0.0955, 0.3455, 0.6545, 0.9045, 1.0000, 0.9045, 0.6545, 0.3455, 0.0955]) +""".format( + **window_common_args + ), +) +def hann(M: int, + *, + sym: bool = True, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[torch.device] = None, + requires_grad: bool = False) -> Tensor: + return general_hamming(M, + alpha=0.5, + sym=sym, + dtype=dtype, + layout=layout, + device=device, + requires_grad=requires_grad) + + +@_add_docstr( + r""" +Computes the Blackman window. + +The Blackman window is defined as follows: + +.. math:: + w_n = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{M - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{M - 1} \right) + """, + r""" + +{normalization} + +Arguments: + {M} + +Keyword args: + {sym} + {dtype} + {layout} + {device} + {requires_grad} + +Examples:: + + >>> # Generates a symmetric Blackman window. + >>> torch.signal.windows.blackman(5) + tensor([-1.4901e-08, 3.4000e-01, 1.0000e+00, 3.4000e-01, -1.4901e-08]) + + >>> # Generates a periodic Blackman window. + >>> torch.signal.windows.blackman(5, sym=False) + tensor([-1.4901e-08, 2.0077e-01, 8.4923e-01, 8.4923e-01, 2.0077e-01]) +""".format( + **window_common_args + ), +) +def blackman(M: int, + *, + sym: bool = True, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[torch.device] = None, + requires_grad: bool = False) -> Tensor: + if dtype is None: + dtype = torch.get_default_dtype() + + _window_function_checks('blackman', M, dtype, layout) + + return general_cosine(M, a=[0.42, 0.5, 0.08], sym=sym, dtype=dtype, layout=layout, device=device, + requires_grad=requires_grad) + + +@_add_docstr( + r""" +Computes the Bartlett window. + +The Bartlett window is defined as follows: + +.. math:: + w_n = 1 - \left| \frac{2n}{M - 1} - 1 \right| = \begin{cases} + \frac{2n}{M - 1} & \text{if } 0 \leq n \leq \frac{M - 1}{2} \\ + 2 - \frac{2n}{M - 1} & \text{if } \frac{M - 1}{2} < n < M \\ \end{cases} + """, + r""" + +{normalization} + +Arguments: + {M} + +Keyword args: + {sym} + {dtype} + {layout} + {device} + {requires_grad} + +Examples:: + + >>> # Generates a symmetric Bartlett window. + >>> torch.signal.windows.bartlett(10) + tensor([0.0000, 0.2222, 0.4444, 0.6667, 0.8889, 0.8889, 0.6667, 0.4444, 0.2222, 0.0000]) + + >>> # Generates a periodic Bartlett window. + >>> torch.signal.windows.bartlett(10, sym=False) + tensor([0.0000, 0.2000, 0.4000, 0.6000, 0.8000, 1.0000, 0.8000, 0.6000, 0.4000, 0.2000]) +""".format( + **window_common_args + ), +) +def bartlett(M: int, + *, + sym: bool = True, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[torch.device] = None, + requires_grad: bool = False) -> Tensor: + if dtype is None: + dtype = torch.get_default_dtype() + + _window_function_checks('bartlett', M, dtype, layout) + + if M == 0: + return torch.empty((0,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad) + + if M == 1: + return torch.ones((1,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad) + + start = -1 + constant = 2 / (M if not sym else M - 1) + + k = torch.linspace(start=start, + end=start + (M - 1) * constant, + steps=M, + dtype=dtype, + layout=layout, + device=device, + requires_grad=requires_grad) + + return 1 - torch.abs(k) + + +@_add_docstr( + r""" +Computes the general cosine window. + +The general cosine window is defined as follows: + +.. math:: + w_n = \sum^{M-1}_{i=0} (-1)^i a_i \cos{ \left( \frac{2 \pi i n}{M - 1}\right)} + """, + r""" + +{normalization} + +Arguments: + {M} + +Keyword args: + a (Iterable): the coefficients associated to each of the cosine functions. + {sym} + {dtype} + {layout} + {device} + {requires_grad} + +Examples:: + + >>> # Generates a symmetric general cosine window with 3 coefficients. + >>> torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], sym=True) + tensor([0.5400, 0.3376, 0.1288, 0.4200, 0.9136, 0.9136, 0.4200, 0.1288, 0.3376, 0.5400]) + + >>> # Generates a periodic general cosine window wit 2 coefficients. + >>> torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False) + tensor([0.0000, 0.0955, 0.3455, 0.6545, 0.9045, 1.0000, 0.9045, 0.6545, 0.3455, 0.0955]) +""".format( + **window_common_args + ), +) +def general_cosine(M, *, + a: Iterable, + sym: bool = True, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[torch.device] = None, + requires_grad: bool = False) -> Tensor: + if dtype is None: + dtype = torch.get_default_dtype() + + _window_function_checks('general_cosine', M, dtype, layout) + + if M == 0: + return torch.empty((0,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad) + + if M == 1: + return torch.ones((1,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad) + + if not isinstance(a, Iterable): + raise TypeError("Coefficients must be a list/tuple") + + if not a: + raise ValueError("Coefficients cannot be empty") + + constant = 2 * torch.pi / (M if not sym else M - 1) + + k = torch.linspace(start=0, + end=(M - 1) * constant, + steps=M, + dtype=dtype, + layout=layout, + device=device, + requires_grad=requires_grad) + + a_i = torch.tensor([(-1) ** i * w for i, w in enumerate(a)], device=device, dtype=dtype, requires_grad=requires_grad) + i = torch.arange(a_i.shape[0], dtype=a_i.dtype, device=a_i.device, requires_grad=a_i.requires_grad) + return (a_i.unsqueeze(-1) * torch.cos(i.unsqueeze(-1) * k)).sum(0) + + +@_add_docstr( + r""" +Computes the general Hamming window. + +The general Hamming window is defined as follows: + +.. math:: + w_n = \alpha - (1 - \alpha) \cos{ \left( \frac{2 \pi n}{M-1} \right)} + """, + r""" + +{normalization} + +Arguments: + {M} + +Keyword args: + alpha (float, optional): the window coefficient. Default: 0.54. + {sym} + {dtype} + {layout} + {device} + {requires_grad} + +Examples:: + + >>> # Generates a symmetric Hamming window with the general Hamming window. + >>> torch.signal.windows.general_hamming(10, sym=True) + tensor([0.0800, 0.1876, 0.4601, 0.7700, 0.9723, 0.9723, 0.7700, 0.4601, 0.1876, 0.0800]) + + >>> # Generates a periodic Hann window with the general Hamming window. + >>> torch.signal.windows.general_hamming(10, alpha=0.5, sym=False) + tensor([0.0000, 0.0955, 0.3455, 0.6545, 0.9045, 1.0000, 0.9045, 0.6545, 0.3455, 0.0955]) +""".format( + **window_common_args + ), +) +def general_hamming(M, + *, + alpha: float = 0.54, + sym: bool = True, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[torch.device] = None, + requires_grad: bool = False) -> Tensor: + return general_cosine(M, + a=[alpha, 1. - alpha], + sym=sym, + dtype=dtype, + layout=layout, + device=device, + requires_grad=requires_grad) + + +@_add_docstr( + r""" +Computes the minimum 4-term Blackman-Harris window according to Nuttall. + +.. math:: + w_n = 1 - 0.36358 \cos{(z_n)} + 0.48917 \cos{(2z_n)} - 0.13659 \cos{(3z_n)} + 0.01064 \cos{(4z_n)} + +where ``z_n = 2 π n/ M``. + """, + """ + +{normalization} + +Arguments: + {M} + +Keyword args: + {sym} + {dtype} + {layout} + {device} + {requires_grad} + +References:: + + - A. Nuttall, “Some windows with very good sidelobe behavior,” + IEEE Transactions on Acoustics, Speech, and Signal Processing, vol. 29, no. 1, pp. 84-91, + Feb 1981. https://doi.org/10.1109/TASSP.1981.1163506 + + - Heinzel G. et al., “Spectrum and spectral density estimation by the Discrete Fourier transform (DFT), + including a comprehensive list of window functions and some new flat-top windows”, + February 15, 2002 https://holometer.fnal.gov/GH_FFT.pdf + +Examples:: + + >>> # Generates a symmetric Nutall window. + >>> torch.signal.windows.general_hamming(5, sym=True) + tensor([3.6280e-04, 2.2698e-01, 1.0000e+00, 2.2698e-01, 3.6280e-04]) + + >>> # Generates a periodic Nuttall window. + >>> torch.signal.windows.general_hamming(5, sym=False) + tensor([3.6280e-04, 1.1052e-01, 7.9826e-01, 7.9826e-01, 1.1052e-01]) +""".format( + **window_common_args + ), +) +def nuttall( + M: int, + *, + sym: bool = True, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[torch.device] = None, + requires_grad: bool = False +) -> Tensor: + return general_cosine(M, + a=[0.3635819, 0.4891775, 0.1365995, 0.0106411], + sym=sym, + dtype=dtype, + layout=layout, + device=device, + requires_grad=requires_grad)