diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_logging/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_logging/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1260d6aec102c204ec4687bd39824ca1a979d6ca --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_logging/__init__.py @@ -0,0 +1,15 @@ +# Top level logging module for torch logging +# Design doc: https://docs.google.com/document/d/1ZRfTWKa8eaPq1AxaiHrq4ASTPouzzlPiuquSBEJYwS8/edit# +# Simple setup for onboarding (see above doc for more detail): +# 1. register any top-level log qualified name for your module in torch._logging._registrations (see there for examples) +# 2. register any artifacts ( below) in torch._logging._registrations +# a. call getArtifactLogger(__name__, ) at your logging site instead of the standard logger to log your artifact +import torch._logging._registrations +from ._internal import ( + _init_logs, + DEFAULT_LOGGING, + getArtifactLogger, + LazyString, + set_logs, + warning_once, +) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..256841bc746bebfc288628d40d69ac069ad5be06 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93609c75040c05a564fd28b0035b5a948c8ba71b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfef61406c1b45a28811b9114bc4c45c1bfa8dff Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_logging/_internal.py b/env-llmeval/lib/python3.10/site-packages/torch/_logging/_internal.py new file mode 100644 index 0000000000000000000000000000000000000000..1fb42c9c0c6de8bfd2fda882ae09c1faf8b86449 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_logging/_internal.py @@ -0,0 +1,826 @@ +import functools +import itertools +import logging +import os +import re +from dataclasses import dataclass, field +from importlib import __import__ +from typing import Dict, List, Optional, Set, Union +from weakref import WeakSet + +log = logging.getLogger(__name__) + +DEFAULT_LOG_LEVEL = logging.WARNING +LOG_ENV_VAR = "TORCH_LOGS" +LOG_FORMAT_ENV_VAR = "TORCH_LOGS_FORMAT" + + +@dataclass +class LogRegistry: + # shorthand name to log qualified name + # Note: this only contains loggers registered + # from register_log + # e.g. "dynamo" -> "torch._dynamo" + log_alias_to_log_qnames: Dict[str, List[str]] = field(default_factory=dict) + + # artifact logger qualified names, + # this is populated lazily, as calls to getArtifactLogger + # currently formatted as .__ + # e.g. "torch._dynamo.convert_frame.__guards" + artifact_log_qnames: Set[str] = field(default_factory=set) + + # child logs of registered logs if specified via open + # registration by the user (ie placing "torch._dynamo.output_graph" in the env var) + # these need to be tracked so their levels can be reset properly + # e.g. "torch._dynamo.output_graph" + child_log_qnames: Set[str] = field(default_factory=set) + + # artifact names, populated by register_artifact + # e.g. "guards" + artifact_names: Set[str] = field(default_factory=set) + + # Artifacts that should be visible by default in the error message + visible_artifacts: Set[str] = field(default_factory=set) + + # A short description of each artifact + artifact_descriptions: Dict[str, str] = field(default_factory=dict) + + # artifacts which are not displayed unless explicitly named in the + # settings. Ex. output_code is NOT displayed even if the inductor + # log level is set to DEBUG. It must be explicitly named in the settings + off_by_default_artifact_names: Set[str] = field(default_factory=set) + + # logging format string for artifacts + artifact_log_formatters: Dict[str, logging.Formatter] = field(default_factory=dict) + + def is_artifact(self, name): + return name in self.artifact_names + + def is_log(self, alias): + return alias in self.log_alias_to_log_qnames + + # register a log with an alias + def register_log(self, alias, log_qnames: Union[str, List[str]]): + if isinstance(log_qnames, str): + log_qnames = [log_qnames] + self.log_alias_to_log_qnames[alias] = log_qnames + + # register an artifact name + def register_artifact_name( + self, name, description, visible, off_by_default, log_format + ): + self.artifact_names.add(name) + if visible: + self.visible_artifacts.add(name) + self.artifact_descriptions[name] = description + + # if off by default, don't enable it + # when log_name's log_level is set to DEBUG + if off_by_default: + self.off_by_default_artifact_names.add(name) + + if log_format is not None: + self.artifact_log_formatters[name] = logging.Formatter(log_format) + + # register the qualified name of an artifact log + # this is needed to know which logs need to be reset + # whenever the log_state is changed + def register_artifact_log(self, artifact_log_qname): + self.artifact_log_qnames.add(artifact_log_qname) + + def register_child_log(self, log_qname): + self.child_log_qnames.add(log_qname) + + # flattens all the qnames together (TODO: consider memoizing?) + def get_log_qnames(self) -> Set[str]: + return { + qname + for qnames in self.log_alias_to_log_qnames.values() + for qname in qnames + } + + def get_artifact_log_qnames(self): + return set(self.artifact_log_qnames) + + def get_child_log_qnames(self): + return set(self.child_log_qnames) + + def is_off_by_default(self, artifact_qname): + return artifact_qname in self.off_by_default_artifact_names + + +@dataclass +class LogState: + # qualified log names -> currently set log level + log_qname_to_level: Dict[str, str] = field(default_factory=dict) + + # the set of currently enabled artifacts + artifact_names: Set[str] = field(default_factory=set) + + def enable_artifact(self, artifact_name): + self.artifact_names.add(artifact_name) + + def is_artifact_enabled(self, name): + return name in self.artifact_names + + def enable_log(self, log_qnames, log_level): + if isinstance(log_qnames, str): + log_qnames = [log_qnames] + for log_qname in log_qnames: + self.log_qname_to_level[log_qname] = log_level + + def get_log_level_pairs(self): + """Returns all qualified module names for which the user requested + explicit logging settings. + + .. warning: + + This function used to return all loggers, regardless of whether + or not the user specified them or not; it now only returns logs + which were explicitly mentioned by the user (and torch, which + always is implicitly requested when we initialize our logging + subsystem.) + """ + return self.log_qname_to_level.items() + + def clear(self): + self.log_qname_to_level.clear() + self.artifact_names.clear() + + +log_registry = LogRegistry() +log_state = LogState() + +# sample usage: torch._logging.set_logs(**torch._logging.DEFAULT_LOGGING) +DEFAULT_LOGGING = { + "dynamo": logging.INFO, + "graph_code": True, + "aot": logging.INFO, + "graph_breaks": True, + "recompiles": True, + "dynamic": logging.INFO, + "guards": True, + "trace_source": True, +} + + +def set_logs( + *, + all: Optional[int] = None, + dynamo: Optional[int] = None, + aot: Optional[int] = None, + dynamic: Optional[int] = None, + inductor: Optional[int] = None, + distributed: Optional[int] = None, + onnx: Optional[int] = None, + bytecode: bool = False, + aot_graphs: bool = False, + aot_joint_graph: bool = False, + ddp_graphs: bool = False, + graph: bool = False, + graph_code: bool = False, + graph_breaks: bool = False, + graph_sizes: bool = False, + guards: bool = False, + recompiles: bool = False, + recompiles_verbose: bool = False, + trace_source: bool = False, + trace_call: bool = False, + output_code: bool = False, + schedule: bool = False, + perf_hints: bool = False, + post_grad_graphs: bool = False, + onnx_diagnostics: bool = False, + fusion: bool = False, + overlap: bool = False, + modules: Optional[Dict[str, Union[int, bool]]] = None, +): + """ + Sets the log level for individual components and toggles individual log + artifact types. + + .. warning:: This feature is a prototype and may have compatibility + breaking changes in the future. + + .. note:: The ``TORCH_LOGS`` environment variable has complete precedence + over this function, so if it was set, this function does nothing. + + A component is a set of related features in PyTorch. All of the log + messages emitted from a given component have their own log levels. If the + log level of a particular message has priority greater than or equal to its + component's log level setting, it is emitted. Otherwise, it is supressed. + This allows you to, for instance, silence large groups of log messages that + are not relevant to you and increase verbosity of logs for components that + are relevant. The expected log level values, ordered from highest to lowest + priority, are: + + * ``logging.CRITICAL`` + * ``logging.ERROR`` + * ``logging.WARNING`` + * ``logging.INFO`` + * ``logging.DEBUG`` + * ``logging.NOTSET`` + + See documentation for the Python ``logging`` module for more information on + log levels: ``_ + + An artifact is a particular type of log message. Each artifact is assigned + to a parent component. A component can emit many different kinds of + artifacts. In general, an artifact is emitted if either its corresponding + setting in the argument list below is turned on or if its parent component + is set to a log level less than or equal to the log level of the artifact. + + Keyword args: + all (:class:`Optional[int]`): + The default log level for all components. Default: ``logging.WARN`` + + dynamo (:class:`Optional[int]`): + The log level for the TorchDynamo component. Default: ``logging.WARN`` + + aot (:class:`Optional[int]`): + The log level for the AOTAutograd component. Default: ``logging.WARN`` + + inductor (:class:`Optional[int]`): + The log level for the TorchInductor component. Default: ``logging.WARN`` + + dynamic (:class:`Optional[int]`): + The log level for dynamic shapes. Default: ``logging.WARN`` + + distributed (:class:`Optional[int]`): + Whether to log communication operations and other debug info from pytorch distributed components. + Default: ``logging.WARN`` + + onnx (:class:`Optional[int]`): + The log level for the ONNX exporter component. Default: ``logging.WARN`` + + bytecode (:class:`bool`): + Whether to emit the original and generated bytecode from TorchDynamo. + Default: ``False`` + + aot_graphs (:class:`bool`): + Whether to emit the graphs generated by AOTAutograd. Default: ``False`` + + aot_joint_graph (:class:`bool`): + Whether to emit the joint forward-backward graph generated by AOTAutograd. Default: ``False`` + + ddp_graphs (:class:`bool`): + Whether to emit graphs generated by DDPOptimizer. Default: ``False`` + + graph (:class:`bool`): + Whether to emit the graph captured by TorchDynamo in tabular format. + Default: ``False`` + + graph_code (:class:`bool`): + Whether to emit the python source of the graph captured by TorchDynamo. + Default: ``False`` + + graph_breaks (:class:`bool`): + Whether to emit the graph breaks encountered by TorchDynamo. + Default: ``False`` + + graph_sizes (:class:`bool`): + Whether to emit tensor sizes of the graph captured by TorchDynamo. + Default: ``False`` + + guards (:class:`bool`): + Whether to emit the guards generated by TorchDynamo for each compiled + function. Default: ``False`` + + recompiles (:class:`bool`): + Whether to emit a guard failure reason and message every time + TorchDynamo recompiles a function. Default: ``False`` + + recompiles_verbose (:class:`bool`): + Whether to emit all guard failure reasons when TorchDynamo recompiles + a function, even those that are not actually run. Default: ``False`` + + trace_source (:class:`bool`): + Whether to emit when TorchDynamo begins tracing a new line. Default: ``False`` + + trace_call (:class:`bool`): + Whether to emit detailed line location when TorchDynamo creates an FX node + corresponding to function call. Python 3.11+ only. Default: ``False`` + + output_code (:class:`bool`): + Whether to emit the TorchInductor output code. Default: ``False`` + + schedule (:class:`bool`): + Whether to emit the TorchInductor schedule. Default: ``False`` + + perf_hints (:class:`bool`): + Whether to emit the TorchInductor perf hints. Default: ``False`` + + post_grad_graphs (:class:`bool`): + Whether to emit the graphs generated by after post grad passes. Default: ``False`` + + onnx_diagnostics (:class:`bool`): + Whether to emit the ONNX exporter diagnostics in logging. Default: ``False`` + + fusion (:class:`bool`): + Whether to emit detailed Inductor fusion decisions. Default: ``False`` + + overlap (:class:`bool`): + Whether to emit detailed Inductor compute/comm overlap decisions. Default: ``False`` + + modules (dict): + This argument provides an alternate way to specify the above log + component and artifact settings, in the format of a keyword args + dictionary given as a single argument. There are two cases + where this is useful (1) if a new log component or artifact has + been registered but a keyword argument for it has not been added + to this function and (2) if the log level for an unregistered module + needs to be set. This can be done by providing the fully-qualified module + name as the key, with the log level as the value. Default: ``None`` + + + Example:: + + >>> # xdoctest: +SKIP + >>> import logging + + # The following changes the "dynamo" component to emit DEBUG-level + # logs, and to emit "graph_code" artifacts. + + >>> torch._logging.set_logs(dynamo=logging.DEBUG, graph_code=True) + + # The following enables the logs for a different module + + >>> torch._logging.set_logs(modules={"unregistered.module.name": logging.DEBUG}) + """ + # ignore if env var is set + if LOG_ENV_VAR in os.environ: + log.warning( + "Using TORCH_LOGS environment variable for log settings, ignoring call to set_logs" + ) + return + + log_state.clear() + + modules = modules or {} + + def _set_logs(**kwargs): + for alias, val in itertools.chain(kwargs.items(), modules.items()): # type: ignore[union-attr] + if val is None: + continue + + if log_registry.is_artifact(alias): + if not isinstance(val, bool): + raise ValueError( + f"Expected bool to enable artifact {alias}, received {val}" + ) + + if val: + log_state.enable_artifact(alias) + elif log_registry.is_log(alias) or alias in log_registry.child_log_qnames: + if val not in logging._levelToName: + raise ValueError( + f"Unrecognized log level for log {alias}: {val}, valid level values " + f"are: {','.join([str(k) for k in logging._levelToName.keys()])}" + ) + + log_state.enable_log( + log_registry.log_alias_to_log_qnames.get(alias, alias), val + ) + else: + raise ValueError( + f"Unrecognized log or artifact name passed to set_logs: {alias}" + ) + + _init_logs() + + _set_logs( + torch=all, + dynamo=dynamo, + aot=aot, + inductor=inductor, + dynamic=dynamic, + bytecode=bytecode, + aot_graphs=aot_graphs, + aot_joint_graph=aot_joint_graph, + ddp_graphs=ddp_graphs, + distributed=distributed, + graph=graph, + graph_code=graph_code, + graph_breaks=graph_breaks, + graph_sizes=graph_sizes, + guards=guards, + recompiles=recompiles, + recompiles_verbose=recompiles_verbose, + trace_source=trace_source, + trace_call=trace_call, + output_code=output_code, + schedule=schedule, + perf_hints=perf_hints, + post_grad_graphs=post_grad_graphs, + onnx=onnx, + onnx_diagnostics=onnx_diagnostics, + fusion=fusion, + overlap=overlap, + ) + + +def get_loggers(): + """ + Returns: a list of all registered loggers + """ + return [logging.getLogger(qname) for qname in log_registry.get_log_qnames()] + + +def register_log(setting_name, log_name): + """ + Enables a log to be controlled by the env var and user API with the setting_name + Args: + setting_name: the shorthand name used in the env var and user API + log_name: the log name that the setting_name is associated with + """ + log_registry.register_log(setting_name, log_name) + + +def register_artifact( + setting_name, description, visible=False, off_by_default=False, log_format=None +): + """ + Enables an artifact to be controlled by the env var and user API with name + Args: + setting_name: the shorthand name used in the env var and user API + description: A description of what this outputs + visible: Whether it gets suggested to users by default + off_by_default: whether this artifact should be logged when the ancestor loggers + are enabled at level DEBUG + """ + log_registry.register_artifact_name( + setting_name, description, visible, off_by_default, log_format + ) + + +def getArtifactLogger(module_qname, artifact_name): + if artifact_name not in log_registry.artifact_names: + raise ValueError( + f"Artifact name: {repr(artifact_name)} not registered," + f"please call register_artifact({repr(artifact_name)}) in torch._logging.registrations." + ) + qname = module_qname + f".__{artifact_name}" + log = logging.getLogger(qname) + log.artifact_name = artifact_name # type: ignore[attr-defined] + log_registry.register_artifact_log(qname) + configure_artifact_log(log) + return log + + +INCR_VERBOSITY_CHAR = "+" +DECR_VERBOSITY_CHAR = "-" +VERBOSITY_REGEX = ( + "(" + + "|".join([re.escape(INCR_VERBOSITY_CHAR), re.escape(DECR_VERBOSITY_CHAR)]) + + "?)" +) + + +def configure_artifact_log(log): + # If the artifact is off by default, then it should only be logged when explicitly + # enabled; set propagate to False so that this artifact is not propagated + # to its ancestor logger + if log_registry.is_off_by_default(log.artifact_name): + log.propagate = False + + # enable artifact logging when explicitly enabled + if log_state.is_artifact_enabled(log.artifact_name): + log.setLevel(logging.DEBUG) + log.propagate = True + + +# match a comma separated list of loggable names (whitespace allowed after commas) +def _gen_settings_regex(): + return re.compile(r"((\+|-)?[\w\.]+,\s*)*(\+|-)?[\w\.]+?") + + +def _validate_settings(settings): + return re.fullmatch(_gen_settings_regex(), settings) is not None + + +def help_message(verbose=False): + def pad_to(s, length=30): + assert len(s) <= length + return s + " " * (length - len(s)) + + if verbose: + printed_artifacts = log_registry.artifact_names + else: + printed_artifacts = log_registry.visible_artifacts + + if verbose: + heading = "All registered names" + else: + heading = "Visible registered names (use TORCH_LOGS='+help' for full list)" + lines = ( + ["all"] + + sorted(log_registry.log_alias_to_log_qnames.keys()) + + sorted( + [ + f"{pad_to(name)}\t{log_registry.artifact_descriptions[name]}" + for name in printed_artifacts + ] + ) + ) + setting_info = " " + "\n ".join(lines) + examples = """ +Examples: + TORCH_LOGS="+dynamo,aot" will set the log level of TorchDynamo to + logging.DEBUG and AOT to logging.INFO + + TORCH_LOGS="-dynamo,+inductor" will set the log level of TorchDynamo to + logging.ERROR and TorchInductor to logging.DEBUG + + TORCH_LOGS="aot_graphs" will enable the aot_graphs artifact + + TORCH_LOGS="+dynamo,schedule" will enable set the log level of TorchDynamo + to logging.DEBUG and enable the schedule artifact + + TORCH_LOGS="+some.random.module,schedule" will set the log level of + some.random.module to logging.DEBUG and enable the schedule artifact + + TORCH_LOGS_FORMAT="%(levelname)s: %(message)s" or any provided format + string will set the output format + Valid keys are "levelname", "message", "pathname", "levelno", "lineno", + "filename" and "name". +""" # flake8: noqa: B950 + msg = f""" +TORCH_LOGS Info +{examples} + +{heading} +{setting_info} +""" + return msg + + +def _invalid_settings_err_msg(settings, verbose=False): + valid_settings = ", ".join( + ["all"] + + list(log_registry.log_alias_to_log_qnames.keys()) + + list(log_registry.artifact_names) + ) + msg = f""" +Invalid log settings: {settings}, must be a comma separated list of fully +qualified module names, registered log names or registered artifact names. +For more info on various settings, try TORCH_LOGS="help" +Valid settings: +{valid_settings} +""" + return msg + + +@functools.lru_cache +def _parse_log_settings(settings): + if settings == "": + return dict() + + if settings == "help": + raise ValueError(help_message(verbose=False)) + elif settings == "+help": + raise ValueError(help_message(verbose=True)) + if not _validate_settings(settings): + raise ValueError(_invalid_settings_err_msg(settings)) + + settings = re.sub(r"\s+", "", settings) + log_names = settings.split(",") + + def get_name_level_pair(name): + clean_name = name.replace(INCR_VERBOSITY_CHAR, "") + clean_name = clean_name.replace(DECR_VERBOSITY_CHAR, "") + + if name[0] == INCR_VERBOSITY_CHAR: + level = logging.DEBUG + elif name[0] == DECR_VERBOSITY_CHAR: + level = logging.ERROR + else: + level = logging.INFO + + return clean_name, level + + log_state = LogState() + + for name in log_names: + name, level = get_name_level_pair(name) + + if name == "all": + name = "torch" + + if log_registry.is_log(name): + assert level is not None + log_qnames = log_registry.log_alias_to_log_qnames[name] + log_state.enable_log(log_qnames, level) + elif log_registry.is_artifact(name): + log_state.enable_artifact(name) + elif _is_valid_module(name): + if not _has_registered_parent(name): + log_registry.register_log(name, name) + else: + log_registry.register_child_log(name) + log_state.enable_log(name, level) + else: + raise ValueError(_invalid_settings_err_msg(settings)) + + return log_state + + +def _is_valid_module(qname): + try: + __import__(qname) + return True + except ImportError: + return False + + +def _update_log_state_from_env(): + global log_state + log_setting = os.environ.get(LOG_ENV_VAR, None) + if log_setting is not None: + log_state = _parse_log_settings(log_setting) + + +def _has_registered_parent(log_qname): + cur_log = logging.getLogger(log_qname) + + registered_log_qnames = log_registry.get_log_qnames() + + while cur_log.parent: + if cur_log.name in registered_log_qnames: + return True + cur_log = cur_log.parent + + return False + + +# apply custom formats to artifacts when necessary +class TorchLogsFormatter(logging.Formatter): + def format(self, record): + artifact_name = getattr(logging.getLogger(record.name), "artifact_name", None) + if artifact_name is not None: + artifact_formatter = log_registry.artifact_log_formatters.get( + artifact_name, None + ) + if artifact_formatter is not None: + return artifact_formatter.format(record) + + record.message = record.getMessage() + record.asctime = self.formatTime(record, self.datefmt) + + # exception handling - copied from logging.Formatter.format + s = record.message + if record.exc_info: + # Cache the traceback text to avoid converting it multiple times + # (it's constant anyway) + if not record.exc_text: + record.exc_text = self.formatException(record.exc_info) + if record.exc_text: + if s[-1:] != "\n": + s = s + "\n" + s = s + record.exc_text + if record.stack_info: + if s[-1:] != "\n": + s = s + "\n" + s = s + self.formatStack(record.stack_info) + + lines = s.split("\n") + record.rankprefix = "" + if dist.is_available() and dist.is_initialized(): + record.rankprefix = f"[rank{dist.get_rank()}]:" + + record.traceid = "" + if (trace_id := torch._guards.CompileContext.current_trace_id()) is not None: + record.traceid = f" [{trace_id}]" + + prefix = f"{record.rankprefix}[{record.asctime}]{record.traceid} {record.name}: [{record.levelname}]" + return "\n".join(f"{prefix} {l}" for l in lines) + + +def _default_formatter(): + fmt = os.environ.get(LOG_FORMAT_ENV_VAR, None) + if fmt is None: + return TorchLogsFormatter() + else: + return logging.Formatter(fmt) + + +DEFAULT_FORMATTER = _default_formatter() + + +def _setup_handlers(create_handler_fn, log): + debug_handler = _track_handler(create_handler_fn()) + debug_handler.setFormatter(DEFAULT_FORMATTER) + debug_handler.setLevel(logging.DEBUG) + log.addHandler(debug_handler) + + +handlers = WeakSet() # type: ignore[var-annotated] + + +# mark handlers that we've created +# so we don't modify user handlers +def _track_handler(handler): + handlers.add(handler) + return handler + + +def _is_torch_handler(handler): + return handler in handlers + + +# clears all torch handlers on specified loggers +def _clear_handlers(log): + to_remove = [handler for handler in log.handlers if _is_torch_handler(handler)] + for handler in to_remove: + log.removeHandler(handler) + + +def _reset_logs(): + # reset all registered logs + for log_qname in log_registry.get_log_qnames(): + log = logging.getLogger(log_qname) + log.setLevel(logging.WARNING) + log.propagate = False + _clear_handlers(log) + + # reset all artifact and child logs + for artifact_log_qname in itertools.chain( + log_registry.get_artifact_log_qnames(), log_registry.get_child_log_qnames() + ): + log = logging.getLogger(artifact_log_qname) + log.setLevel(logging.NOTSET) + log.propagate = True + + +def _get_log_state(): + return log_state + + +def _set_log_state(state): + global log_state + log_state = state + + +def _init_logs(log_file_name=None): + _reset_logs() + _update_log_state_from_env() + + # First, reset all known (registered) loggers to NOTSET, so that they + # respect their parent log level + for log_qname in log_registry.get_log_qnames(): + # But not the top level torch level: this defaults to WARNING so + # that our log messages don't leak to the lower levels + if log_qname == "torch": + continue + log = logging.getLogger(log_qname) + log.setLevel(logging.NOTSET) + + # Now, for all loggers which the user requested to have non-standard + # logging behavior, modify their log levels + for log_qname, level in log_state.get_log_level_pairs(): + log = logging.getLogger(log_qname) + log.setLevel(level) + + # Finally, setup handlers for all registered loggers + for log_qname in log_registry.get_log_qnames(): + log = logging.getLogger(log_qname) + _setup_handlers( + logging.StreamHandler, + log, + ) + + if log_file_name is not None: + _setup_handlers( + lambda: logging.FileHandler(log_file_name), + log, + ) + + # configure artifact loggers, note: this must happen last + # since the levels of ancestor loggers are taken into account + for artifact_log_qname in log_registry.get_artifact_log_qnames(): + log = logging.getLogger(artifact_log_qname) + configure_artifact_log(log) + + +@functools.lru_cache(None) +def warning_once(logger_obj, *args, **kwargs): + """ + This function is similar to `logger.warning()`, but will emit the warning with the same message only once + Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache. + The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to + another type of cache that includes the caller frame information in the hashing function. + """ + logger_obj.warning(*args, **kwargs) + + +class LazyString: + def __init__(self, func, *args, **kwargs): + self.func = func + self.args = args + self.kwargs = kwargs + + def __str__(self): + return self.func(*self.args, **self.kwargs) + + +import torch._guards +import torch.distributed as dist diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_logging/_registrations.py b/env-llmeval/lib/python3.10/site-packages/torch/_logging/_registrations.py new file mode 100644 index 0000000000000000000000000000000000000000..0430e1175f5d0b8f552a2c6a4332445b8f25d74f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_logging/_registrations.py @@ -0,0 +1,110 @@ +# flake8: noqa: B950 +from ._internal import register_artifact, register_log + +DYNAMIC = ["torch.fx.experimental.symbolic_shapes", "torch.fx.experimental.sym_node"] +DISTRIBUTED = ["torch.distributed", "torch._dynamo.backends.distributed"] + +register_log("dynamo", ["torch._dynamo", *DYNAMIC]) +register_log("aot", ["torch._functorch.aot_autograd", "torch._functorch._aot_autograd"]) +register_log("inductor", "torch._inductor") +register_log("dynamic", DYNAMIC) +register_log("torch", "torch") +register_log("distributed", DISTRIBUTED) +register_log("onnx", "torch.onnx") + +register_artifact( + "guards", + "This prints the guards for every compiled Dynamo frame. It does not tell you where the guards come from.", + visible=True, +) +register_artifact("verbose_guards", "", off_by_default=True) +register_artifact( + "bytecode", + "Prints the original and modified bytecode from Dynamo. Mostly useful if you're debugging our bytecode generation in Dynamo.", + off_by_default=True, +) +register_artifact( + "graph", + "Prints the dynamo traced graph (prior to AOTDispatch) in a table. If you prefer python code use `graph_code` instead. ", +) +register_artifact("graph_code", "Like `graph`, but gives you the Python code instead.") +register_artifact( + "graph_sizes", "Prints the sizes of all FX nodes in the dynamo graph." +) +register_artifact( + "trace_source", + "As we execute bytecode, prints the file name / line number we are processing and the actual source code. Useful with `bytecode`", +) +register_artifact( + "trace_call", + "Like trace_source, but it will give you the per-expression blow-by-blow if your Python is recent enough.", +) +register_artifact( + "aot_graphs", + "Prints the FX forward and backward graph generated by AOTDispatch, after partitioning. Useful to understand what's being given to Inductor", + visible=True, +) +register_artifact( + "aot_joint_graph", + "Print FX joint graph from AOTAutograd, prior to partitioning. Useful for debugging partitioning", +) +register_artifact( + "post_grad_graphs", + "Prints the FX graph generated by post grad passes. Useful to understand what's being given to Inductor after post grad passes", +) +register_artifact( + "compiled_autograd", + "Prints various logs in compiled_autograd, including but not limited to the graphs. Useful for debugging compiled_autograd.", + visible=True, +) +register_artifact( + "ddp_graphs", + "Only relevant for compiling DDP. DDP splits into multiple graphs to trigger comms early. This will print each individual graph here.", +) +register_artifact( + "recompiles", + "Prints the reason why we recompiled a graph. Very, very useful.", + visible=True, +) +register_artifact( + "recompiles_verbose", + "Prints all guard checks that fail during a recompilation. " + "At runtime, Dynamo will stop at the first failed check for each failing guard. " + "So not all logged failing checks are actually ran by Dynamo.", + visible=True, + off_by_default=True, +) +register_artifact( + "graph_breaks", + "Prints whenever Dynamo decides that it needs to graph break (i.e. create a new graph). Useful for debugging why torch.compile has poor performance", + visible=True, +) +register_artifact( + "not_implemented", + "Prints log messages whenever we return NotImplemented in a multi-dispatch, letting you trace through each object we attempted to dispatch to", +) +register_artifact( + "output_code", + "Prints the code that Inductor generates (either Triton or C++)", + off_by_default=True, + visible=True, +) +register_artifact( + "schedule", + "Inductor scheduler information. Useful if working on Inductor fusion algo", + off_by_default=True, +) +register_artifact("perf_hints", "", off_by_default=True) +register_artifact("onnx_diagnostics", "", off_by_default=True) +register_artifact( + "fusion", + "Detailed Inductor fusion decisions. More detailed than 'schedule'", + off_by_default=True, +) +register_artifact( + "overlap", + "Detailed Inductor compute/comm overlap decisions", + off_by_default=True, +) + +register_artifact("custom_format_test_artifact", "Testing only", log_format="") diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e14de74fefea5bc36828f6955ea1b3d749b1cb32 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39debbdf4b72cf14ce994a82130d84512d94504b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite_fx.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite_fx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce6ce12dcee637c126f4a3a46fe251d9aa05774a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/__pycache__/_numeric_suite_fx.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26ceb7f61b6435f74d51b79a73b46d4f0a26c41d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/n_shadows_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/n_shadows_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1b0af5f08beb0202733944fc5861f1288466af3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/n_shadows_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/ns_types.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/ns_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2969ec26b2ce33561159afe73c6ba21eecaa7bc6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/ns_types.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/qconfig_multi_mapping.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/qconfig_multi_mapping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8e4abe9599be8bfabcc8636ae8c102707d7eb9e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/qconfig_multi_mapping.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d4cac6c2c90d4e1f73aeb0c0c29a897ef929f8f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/ao/ns/fx/__pycache__/utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e3c4145fd91f619a1f7ff9c4dca6fae5c1c16abe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__init__.py @@ -0,0 +1,48 @@ +r""" +PyTorch Profiler is a tool that allows the collection of performance metrics during training and inference. +Profiler's context manager API can be used to better understand what model operators are the most expensive, +examine their input shapes and stack traces, study device kernel activity and visualize the execution trace. + +.. note:: + An earlier version of the API in :mod:`torch.autograd` module is considered legacy and will be deprecated. + +""" +import os + +from torch._C._autograd import _supported_activities, DeviceType, kineto_available +from torch._C._profiler import _ExperimentalConfig, ProfilerActivity, RecordScope +from torch.autograd.profiler import KinetoStepTracker, record_function +from torch.optim.optimizer import register_optimizer_step_post_hook + +from .profiler import ( + _KinetoProfile, + ExecutionTraceObserver, + profile, + ProfilerAction, + schedule, + supported_activities, + tensorboard_trace_handler, +) + +__all__ = [ + "profile", + "schedule", + "supported_activities", + "tensorboard_trace_handler", + "ProfilerAction", + "ProfilerActivity", + "kineto_available", + "DeviceType", + "record_function", + "ExecutionTraceObserver", +] + +from . import itt + + +def _optimizer_post_hook(optimizer, args, kwargs): + KinetoStepTracker.increment_step("Optimizer") + + +if os.environ.get("KINETO_USE_DAEMON", None): + _ = register_optimizer_step_post_hook(_optimizer_post_hook) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b1c84414a40ae4686c6a913c9b08d0e1a11a998 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_memory_profiler.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_memory_profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d483d43bbf7033c30a1bb0409ba12cfdf8172031 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_memory_profiler.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_pattern_matcher.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_pattern_matcher.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50fbe738edee28d6ed4c8b214bf6770bfb94fce2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_pattern_matcher.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01a4a0f3e8e354fd5ca434cbc8328a66da6d30c8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/itt.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/itt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7acae92a96ad22be6b6d7e05d4348f37ea9aba3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/itt.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/profiler.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe63e921ab6a58193dea6b9652d8d73f6de1b659 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/profiler.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/python_tracer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/python_tracer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b63d4d7ae183e258abd361749e2b340ebfc3dc6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/profiler/__pycache__/python_tracer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/_memory_profiler.py b/env-llmeval/lib/python3.10/site-packages/torch/profiler/_memory_profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..dcbbe151a647469ecee313e30c1d6c363a535aa9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/profiler/_memory_profiler.py @@ -0,0 +1,1201 @@ +import collections +import dataclasses +import enum +import itertools as it +import logging +from typing import ( + Any, + cast, + DefaultDict, + Dict, + Iterator, + List, + Optional, + Set, + Tuple, + Union, +) + +from typing_extensions import Literal + +import torch +from torch._C import FunctionSchema +from torch._C._autograd import _ProfilerResult +from torch._C._profiler import ( + _EventType, + _ExtraFields_Allocation, + _ExtraFields_TorchOp, + _ProfilerEvent, + _TensorMetadata, + RecordScope, +) +from torch._utils import _element_size +from torch.profiler import _utils + +KeyAndID = Tuple["Key", int] +TensorAndID = Tuple["TensorKey", int] + +log = logging.getLogger(__name__) + + +class Category(enum.Enum): + INPUT = enum.auto() + TEMPORARY = enum.auto() + ACTIVATION = enum.auto() + GRADIENT = enum.auto() + AUTOGRAD_DETAIL = enum.auto() + PARAMETER = enum.auto() + OPTIMIZER_STATE = enum.auto() + + +_CATEGORY_TO_COLORS = { + Category.PARAMETER: "darkgreen", + Category.OPTIMIZER_STATE: "goldenrod", + Category.INPUT: "black", + Category.TEMPORARY: "mediumpurple", + Category.ACTIVATION: "red", + Category.GRADIENT: "mediumblue", + Category.AUTOGRAD_DETAIL: "royalblue", + None: "grey", +} + +_CATEGORY_TO_INDEX = {c: i for i, c in enumerate(_CATEGORY_TO_COLORS)} + + +class Action(enum.Enum): + PREEXISTING = enum.auto() + CREATE = enum.auto() + INCREMENT_VERSION = enum.auto() + DESTROY = enum.auto() + + +_ACTION_TO_INDEX = {i: i.value for i in Action} + + +@dataclasses.dataclass(eq=True, unsafe_hash=False, frozen=True) +class Key: + device: torch.device + + +@dataclasses.dataclass +class _Storage: + """Bundle storage pointer and id. + + All profiling logic should use `allocation_id`, however it is useful to + print storage pointers for debugging and unit tests sometimes look up + values using the storage data pointer of a live Tensor.""" + + ptr: int + allocation_id: int + + def __repr__(self) -> str: + return f"{hex(self.ptr):>18} ({self.allocation_id})" + + def __eq__(self, other: object) -> bool: + return isinstance(other, _Storage) and self.allocation_id == other.allocation_id + + def __hash__(self) -> int: + return hash(self.allocation_id) + + +@dataclasses.dataclass(eq=True, unsafe_hash=True, frozen=True) +class TensorKey(Key): + """Hashable identifier for a storage which has been asigned an ID. + + A detailed description of Tensor IDs and why they are needed is given in + `torch/csrc/profiler/collection.h` when `TensorID` is declared. To + summarize, multiple Storage buffers can map to the same logical Tensor. + This dataclass is used to refer to a concrete in-memory StorageImpl of + a Tensor. + """ + + id: int + storage: _Storage + + def __repr__(self) -> str: + return f"id={self.id}: {repr(self.storage):<24} ({self.device})" + + def __lt__(self, other: "TensorKey") -> bool: + return self._as_sortable < other._as_sortable + + @staticmethod + def _make( + tensor_id: Optional[int], + storage_ptr: Optional[int], + allocation_id: Optional[int], + device: torch.device, + ) -> Optional["TensorKey"]: + if ( + tensor_id is not None + and storage_ptr is not None + and allocation_id is not None + ): + return TensorKey(device, tensor_id, _Storage(storage_ptr, allocation_id)) + return None + + @classmethod + def from_allocation(cls, alloc: _ExtraFields_Allocation) -> Optional["TensorKey"]: + return cls._make(alloc.id, alloc.ptr, alloc.allocation_id, alloc.device) + + @classmethod + def from_tensor(cls, t: Optional[_TensorMetadata]) -> Optional["TensorKey"]: + if t is not None: + return cls._make(t.id, t.storage_data_ptr, t.allocation_id, t.device) + return None + + @property + def _as_sortable(self) -> Tuple[int, int, str, int]: + return self.id, self.storage.allocation_id, self.device.type, self.device.index + + +def _extract_parameters_and_gradients( + node: _ProfilerEvent, +) -> Iterator[Tuple[Optional[TensorKey], Optional[TensorKey]]]: + children = node.children + + # AccumulateGrad is used in the Autograd engine to handle gradient updates. + # There are two possible cases: + # 1) This is a newly created gradient Tensor. In that case there is nothing + # to accumulate, so autograd simply detaches the Tensor. + # + # 2) There is a preexisting gradient Tensor and we need to add the newly + # computed update. This is done with an in-place add (aten::add_) op. + # (The underscore suffix denotes "in-place".) + if ( + node.typed[0] == _EventType.TorchOp + and node.typed[1].scope == RecordScope.BACKWARD_FUNCTION + # TODO(robieta): Move away from load bearing names + and node.name == "torch::autograd::AccumulateGrad" + and children + and children[0].typed[0] == _EventType.TorchOp + and children[0].name in ("aten::detach", "aten::add_") + and children[0].typed[1].inputs + and isinstance(children[0].typed[1].inputs[0], _TensorMetadata) + ): + yield None, TensorKey.from_tensor(children[0].typed[1].inputs[0]) + + # We directly instrument `torch.nn.Module` and `torch.optim.Optimizer` + # NOTE: The values captured by the python tracer are cached; they can be + # used to build up labels but do not imply that a Tensor was live at + # a particular time. + elif node.typed[0] == _EventType.PyCall: + typed_fields = node.typed[1] + assert typed_fields.module is None or typed_fields.optimizer is None + if typed_fields.module is not None: + for _, p, p_grad in typed_fields.module.parameters: + yield TensorKey.from_tensor(p), TensorKey.from_tensor(p_grad) + + if typed_fields.optimizer is not None: + for p, p_grad, _ in typed_fields.optimizer.parameters: + yield TensorKey.from_tensor(p), TensorKey.from_tensor(p_grad) + + +def extract_parameters(node: _ProfilerEvent) -> Iterator[TensorKey]: + for p, p_grad in _extract_parameters_and_gradients(node): + if p is not None: + yield p + + +def extract_gradients( + node: _ProfilerEvent, +) -> Iterator[Tuple[Optional[TensorKey], TensorKey]]: + for p, p_grad in _extract_parameters_and_gradients(node): + if p_grad is not None: + yield p, p_grad + + +def get_scopes(event: Optional[_ProfilerEvent]) -> Tuple[RecordScope, ...]: + scopes = [] + while event: + if event.typed[0] == _EventType.TorchOp: + scopes.append(event.typed[1].scope) + event = event.parent + return tuple(scopes) + + +class SchemaMatcher: + """Lookup operator schema based on profiled name. + + When profiling we record the operator's name but not the schema. However + some analysis requires that information. Fortunately we can look up + registered schema from the recorded name. We do not, however, record the + overload and so we must compare the profiled arguments with all overloads + to determine viable matches. + + Note: Once https://github.com/pytorch/pytorch/issues/78871 is completed + this code will be obsolete. + """ + + @classmethod + def inputs_are_mutable(cls, t: _ExtraFields_TorchOp) -> Tuple[Optional[bool], ...]: + """Determine which inputs may have mutated based on function schema. + + Note that we don't need to resolve down to a single schema to perform + this analysis. An input is mutable if it is mutable in any overload. In + practice, however, it is overwhelmingly common to match a single + overload. If we cannot find any valid schema then we must be + conservative and assume all inputs are mutable. + """ + mutable: Optional[List[bool]] = None + for schema in cls.match_schemas(t): + mutable = mutable or [False for _ in schema.arguments] + for i, arg in enumerate(schema.arguments): + mutable[i] |= getattr(arg.alias_info, "is_write", False) + + return tuple(mutable or (None for _ in t.inputs)) + + @classmethod + def match_schemas(cls, t: _ExtraFields_TorchOp) -> Tuple[FunctionSchema, ...]: + signature = tuple( + # Tensor + TensorKey.from_tensor(i) if isinstance(i, _TensorMetadata) + # + # TensorList + else [TensorKey.from_tensor(j) for j in i] if isinstance(i, list) + # + # Scalar and uncaptured inputs. + else i + for i in t.inputs + ) + + def matches(schema) -> bool: + return len(schema.arguments) == len(signature) and all( + cls._types_match(observed, schema_arg.type) + for observed, schema_arg in zip(signature, schema.arguments) + ) + + return tuple(s for s in cls.lookup_schemas(t.name) or () if matches(s)) + + @classmethod + def _types_match(cls, observed, schema_type) -> bool: + if isinstance(schema_type, torch._C.OptionalType): + schema_type = schema_type.getElementType() + return observed is None or cls._types_match(observed, schema_type) + + if isinstance(schema_type, torch._C.AnyType): + return True + + if schema_type.isSubtypeOf(torch._C.ListType.ofTensors()): + return isinstance(observed, list) and all( + isinstance(i, TensorKey) for i in observed + ) + + type_map: Tuple[Tuple[Any, Union[type, Tuple[type, ...]]], ...] = ( + (torch._C.TensorType, TensorKey), + (torch._C.NoneType, type(None)), + (torch._C.BoolType, bool), + (torch._C.IntType, int), + (torch._C.FloatType, float), + (torch._C.ComplexType, complex), + (torch._C.NumberType, (bool, int, float, complex)), + ) + + for jit_type, py_types in type_map: + if isinstance(schema_type, jit_type): + return isinstance(observed, py_types) + + # Profiler only records a subset of possible argument types. If we + # reach this point then the schema must call for a type that profiler + # does not record. Thus, the schema can only be a match if `observed` + # is also None. + return observed is None + + @staticmethod + def lookup_schemas(name: str) -> Optional[Tuple[FunctionSchema, ...]]: + # TODO(robieta): + # _jit_get_schemas_for_operator is quite expensive. (~100us / call) + # Consider adding `functools.lru_cache` if that becomes an issue. + + try: + # Schema lookup will throw if `name` is malformed. (For example, + # schemas must be namespaced and schema lookup will fail if name + # does not include "::".) We simply catch the exception and return + # `None` to denote that `name` cannot be an operator name. + # + # Note that record_function annotations also go through this path, + # so it is expected that some names will not correspond to PyTorch + # operators. + if "::" not in name: + return None + return tuple(torch._C._jit_get_schemas_for_operator(name)) + except RuntimeError: + return None + + +class OpTree: + def __init__(self, result: _ProfilerResult) -> None: + self._root_nodes = result.experimental_event_tree() + self._sorted_nodes = tuple(sorted(self.dfs(), key=lambda x: x.start_time_ns)) + + def dfs(self, *args, **kwargs) -> Iterator[_ProfilerEvent]: + yield from _utils.traverse_dfs(self._root_nodes, *args, **kwargs) + + @property + def sorted_nodes(self) -> Tuple[_ProfilerEvent, ...]: + return self._sorted_nodes + + +class SizeMap: + def __init__(self, op_tree: OpTree) -> None: + self._values: Dict[TensorKey, int] = {} + + for node in op_tree.sorted_nodes: + if node.typed[0] == _EventType.TorchOp: + for t in self._flat_tensor_inputs(node.typed[1]): + self._update_values(t) + + elif node.typed[0] == _EventType.PyCall: + typed_fields = node.typed[1] + assert typed_fields.module is None or typed_fields.optimizer is None + if typed_fields.module is not None: + for _, p, p_grad in typed_fields.module.parameters: + self._update_values(p) + self._update_values(p_grad) + + if typed_fields.optimizer is not None: + for p, p_grad, state in typed_fields.optimizer.parameters: + self._update_values(p) + self._update_values(p_grad) + for _, t in state: + self._update_values(t) + + allocations: Dict[TensorKey, int] = {} + for node in op_tree.sorted_nodes: + if node.typed[0] == _EventType.Allocation: + alloc_fields = node.typed[1] + key = TensorKey.from_allocation(alloc_fields) + if key: + new_size = abs(alloc_fields.alloc_size) + prior_size = allocations.setdefault(key, new_size) + + # It is possible to resize Storage in PyTorch, however we + # key on data pointer so most resizes will be treated as a + # change in storage. The one corner case that cannot be + # handled is `realloc` which successfully resizes the + # storage. At time of writing this is not done anywhere in + # the core PyTorch codebase. + if prior_size != new_size: + delta = f"{prior_size} vs. {new_size}" + log.warning("Mismatch between allocation and free: %s", delta) + + self._values.update(allocations) + + def _update_values(self, t: Optional[_TensorMetadata]) -> None: + key = TensorKey.from_tensor(t) + if key is not None and t is not None and t.layout == torch.strided: + # Scalars are represented as zero dim Tensors + n = max(i[0] * i[1] for i in zip(t.sizes or [1], t.strides or [1])) + + num_bytes = n * _element_size(t.dtype) + assert num_bytes >= 0, f"{num_bytes}" + self._values[key] = max(self._values.get(key, 0), num_bytes) + + @staticmethod + def _flat_tensor_inputs(op: _ExtraFields_TorchOp) -> Iterator[_TensorMetadata]: + for i in op.inputs: + if isinstance(i, _TensorMetadata): + yield i + elif isinstance(i, list): + yield from i + + def __getitem__(self, key: TensorKey): + return self._values[key] + + +@dataclasses.dataclass() +class DataFlowEdge: + input_version: Optional[int] = None + mutated: Optional[bool] = False + + @property + def is_allocation(self) -> bool: + return self.input_version is None + + @property + def is_deletion(self) -> bool: + return self.mutated is None + + +class DataFlowNode: + def __init__(self, event: _ProfilerEvent, graph: "DataFlowGraph") -> None: + self._event = event + self._graph = graph + self._edges: Dict[TensorKey, DataFlowEdge] = self._determine_edges() + + for key, edge in self._edges.items(): + if edge.mutated and not edge.is_allocation: + self._graph.bump(key) + + # Make sure the version bumping behavior matches what we expect. + versions = {k: (v, self._graph.lookup(k)) for k, v in self.outputs.items()} + assert all(i == j for i, j in versions.values()), f"{versions}, {self._edges}" + + def _determine_edges(self) -> Dict[TensorKey, DataFlowEdge]: + subtree = tuple(_utils.traverse_dfs([self._event])) + + # Start by populating edges from op inputs and outputs. + mutable_by_key: Dict[Optional[TensorKey], Set[Optional[bool]]] = {} + for op in (i.typed[1] for i in subtree if i.typed[0] == _EventType.TorchOp): + for op_input, mutable in zip( + op.inputs, SchemaMatcher.inputs_are_mutable(op) + ): + # Tensor + if isinstance(op_input, _TensorMetadata): + key = TensorKey.from_tensor(op_input) + mutable_by_key.setdefault(key, set()).add(mutable) + + # TensorList + elif isinstance(op_input, list): + for op_input_i in op_input: + key = TensorKey.from_tensor(op_input_i) + mutable_by_key.setdefault(key, set()).add(mutable) + + edges: DefaultDict[Optional[TensorKey], DataFlowEdge] + edges = collections.defaultdict(DataFlowEdge) + for key, mutable_set in mutable_by_key.items(): + if key is not None: + edges[key].input_version = self._graph.lookup(key) if key else -1 + + # We consider an op to be mutated if we encounter a schema where it + # is a mutable argument OR if it is ambiguous. (We never explicitly + # see it in any schema.) + mutated = (True in mutable_set) or (tuple(mutable_set) == (None,)) + edges[key].mutated = mutated + + # Then handle deletions. Note that deleting a Tensor implicitly adds + # it as an input edge. + for i in subtree: + if i.typed[0] == _EventType.Allocation and i.typed[1].alloc_size < 0: + key = TensorKey.from_allocation(i.typed[1]) + edge = edges[key] + assert key is None or edge.mutated is not None, f"Double delete: {key}" + edge.mutated = None + edge.input_version = self._graph.lookup(key) if key else -1 + + # And finally handle allocations. This step must be last, because the + # previous two steps optimistically add input edges. + for i in subtree: + if i.typed[0] == _EventType.Allocation and i.typed[1].alloc_size > 0: + edges[TensorKey.from_allocation(i.typed[1])].input_version = None + + # We don't need to sort the inputs, but it makes debugging and unit tests nicer. + return dict(sorted((k, v) for k, v in edges.items() if k is not None)) + + @property + def inputs(self) -> Dict[TensorKey, Tuple[bool, int]]: + return { + # MyPy can't see through `is_allocation` to know that + # `v.input_version` is not None. + k: (bool(v.mutated), cast(int, v.input_version)) + for k, v in self._edges.items() + if not v.is_allocation + } + + @property + def outputs(self) -> Dict[TensorKey, int]: + return { + k: 0 if v.input_version is None else v.input_version + 1 + for k, v in self._edges.items() + if (v.is_allocation and not v.is_deletion) or v.mutated + } + + @property + def intermediates(self) -> Tuple[TensorKey, ...]: + return tuple( + k for k, v in self._edges.items() if v.is_allocation and v.is_deletion + ) + + @property + def start_time(self) -> int: + return self._event.start_time_ns + + +class DataFlowGraph: + def __init__(self, op_tree: OpTree) -> None: + self._op_tree = op_tree + self._leaf_events = self._extract_leaf_events(op_tree) + self._active_version: Dict[TensorKey, Optional[int]] = {} + self._flow_nodes = [DataFlowNode(e, self) for e in self.leaf_events] + self._flow_nodes.sort(key=lambda x: x.start_time) + self.validate() + + @property + def flow_nodes(self) -> Tuple[DataFlowNode, ...]: + return tuple(self._flow_nodes) + + def validate(self): + # Check that each (Tensor, version) pair has a unique creation node + outputs: Set[Tuple[TensorKey, int]] = set() + for node in self.flow_nodes: + node_outputs = set(node.outputs.items()) + duplicates = outputs & node_outputs + assert not duplicates, f"{node._event.name} {node._edges} {duplicates}" + outputs |= node_outputs + + # And check that `self._nodes` forms a valid topologically sorted DAG. + tensor_versions: Dict[TensorKey, int] = {} + for node in self.flow_nodes: + for key, (_, version) in node.inputs.items(): + expected = tensor_versions.get(key, 0) + assert expected == version, (expected, version) + + for key, version in node.outputs.items(): + prior_version = tensor_versions.get(key, version) + assert version >= prior_version, (version, prior_version) + tensor_versions[key] = version + + @property + def leaf_events(self) -> Tuple[_ProfilerEvent, ...]: + return self._leaf_events + + @staticmethod + def _extract_leaf_events(op_tree: OpTree) -> Tuple[_ProfilerEvent, ...]: + """Partially traverse the op tree and extract top level ops. + + Consider the following code: + ``` + with record_function("My annotation"): + x.zero_() + y.zero_() + ``` + + The op tree (assuming no Autograd) will look like: + + TorchOp: "My annotation" + TorchOp: zero_ + TorchOp: fill_ + TorchOp: zero_ + TorchOp: fill_ + + The recursive structure of operator calls makes data flow unwieldy. + In order to simplify analysis we would like to select the highest level + ops to represent in the graph. In this case those are the `zero_` ops; + the fact that `fill_` is called is an implementation detail. We also + do not want to group everything under "My annotation" as this could + create overly coarse bundles and lose critical semantics. + + To address this issue we walk over the graph and select the topmost + torch ops ** which match at least one operator schema **. These form + the leaves of the first pass through the op tree. (As well as any + allocations or frees which do are not part of a kernel.) These events + form the logical nodes in our data flow graph. + """ + + leaf_events: List[_ProfilerEvent] = [] + + def leaf_op(e: _ProfilerEvent) -> bool: + return e.typed[0] == _EventType.TorchOp and ( + e.typed[1].scope == RecordScope.BACKWARD_FUNCTION + or bool(SchemaMatcher.match_schemas(e.typed[1])) + ) + + def children_fn(e: _ProfilerEvent): + if leaf_op(e) or e.tag == _EventType.Allocation: + leaf_events.append(e) + return [] + + return e.children + + for _ in op_tree.dfs(children_fn=children_fn): + pass + + return tuple(sorted(leaf_events, key=lambda x: x.start_time_ns)) + + def lookup(self, key: TensorKey) -> int: + version = self._active_version.setdefault(key, 0) + assert version is not None + return version + + def bump(self, key: TensorKey) -> None: + prior_version = self._active_version.get(key, None) + assert prior_version is not None + self._active_version[key] = prior_version + 1 + + def delete(self, key: TensorKey) -> None: + assert self._active_version.setdefault(key, 0) is not None + self._active_version[key] = None + + +@dataclasses.dataclass +class CategoryElement: + by_id: Optional[Category] = None + by_key: Dict[TensorKey, Category] = dataclasses.field(default_factory=dict) + by_version: Dict[TensorAndID, Category] = dataclasses.field(default_factory=dict) + + # Used by unit tests to check internals. (And consequently by + # MemoryProfile.lookup) This should not be used in any other capacity. + _by_id_keyset: Set[TensorKey] = dataclasses.field(default_factory=set) + + +@dataclasses.dataclass +class CategoryDict: + _values: DefaultDict[int, CategoryElement] = dataclasses.field( + default_factory=lambda: collections.defaultdict(CategoryElement) + ) + + def set_by_id(self, key: TensorKey, category: Category) -> None: + self._values[key.id].by_id = category + self._values[key.id]._by_id_keyset.add(key) + + def set_by_key(self, key: TensorKey, category: Category) -> None: + self._values[key.id].by_key[key] = category + + def set_by_version(self, key: TensorKey, version: int, category: Category) -> None: + self._values[key.id].by_version[(key, version)] = category + + def setdefault_by_version( + self, key: TensorKey, version: int, category: Category + ) -> None: + self._values[key.id].by_version.setdefault((key, version), category) + + def get(self, key: Key, version: int) -> Optional[Category]: + if isinstance(key, Key) and not isinstance(key, TensorKey): + return None + element = self._values[key.id] + return ( + element.by_id + or element.by_key.get(key, None) + or element.by_version.get((key, version), None) + ) + + +class MemoryProfile: + def __init__(self, result: _ProfilerResult) -> None: + self._op_tree = OpTree(result) + self._data_flow_graph = DataFlowGraph(self._op_tree) + self._size_map = SizeMap(self._op_tree) + self._categories = CategoryDict() + + self._set_gradients_and_temporaries() + self._set_parameters_using_python_tracer() + self._set_inputs() + self._set_parameters_using_data_flow() + self._set_activations() + self._set_optimizer_state() + self._set_autograd_detail() + + @property + def timeline(self) -> Tuple[Tuple[int, Action, KeyAndID, int], ...]: + output: List[Tuple[int, Action, KeyAndID, int]] = [] + allocation_times: Dict[Tuple[TensorKey, bool], int] = {} + live_unknown: Dict[Tuple[int, torch.device], Literal[True]] = {} + for event in self._op_tree.dfs(): + if event.typed[0] == _EventType.Allocation: + alloc_fields = event.typed[1] + alloc_size = alloc_fields.alloc_size + is_allocation = alloc_size > 0 + t = event.start_time_ns + + tkey = TensorKey.from_allocation(alloc_fields) + if tkey is not None: + allocation_times[(tkey, is_allocation)] = t + + else: + key = Key(alloc_fields.device) + ptr_and_device = (alloc_fields.ptr, key.device) + if is_allocation: + if ptr_and_device in live_unknown: + output.append( + (t, Action.INCREMENT_VERSION, (key, 0), alloc_size) + ) + else: + live_unknown[ptr_and_device] = True + output.append((t, Action.CREATE, (key, 0), alloc_size)) + else: + output.append((t, Action.DESTROY, (key, 0), -alloc_size)) + if not live_unknown.pop(ptr_and_device, False): + output.append( + (-1, Action.PREEXISTING, (key, 0), -alloc_size) + ) + + snapshot = self._category_snapshot() + last_version = dict(sorted(snapshot.keys())) + + events: List[Tuple[int, Action, TensorAndID]] = [ + (-1, Action.PREEXISTING, (key, version)) + for key, version in snapshot.keys() + if (key, True) not in allocation_times and version == 0 + ] + + for node in self._data_flow_graph.flow_nodes: + for key, edge in node._edges.items(): + if edge.is_allocation: + t = allocation_times[(key, True)] + events.append((t, Action.CREATE, (key, 0))) + + elif edge.mutated: + t = node._event.start_time_ns + version = edge.input_version + assert version is not None + events.append((t, Action.INCREMENT_VERSION, (key, version))) + + if edge.is_deletion: + t = allocation_times[(key, False)] + events.append((t, Action.DESTROY, (key, last_version[key]))) + + output.extend( + (time, action, (key, version), self._size_map[key]) + for time, action, (key, version) in events + ) + + output.sort(key=lambda x: (x[0], x[1].value)) + return tuple(output) + + def _is_gradient(self, *args, **kwargs) -> bool: + return self._categories.get(*args, **kwargs) == Category.GRADIENT + + def _category_snapshot(self) -> Dict[TensorAndID, Optional[Category]]: + all_tensor_versions: Set[TensorAndID] = set() + + for node in self._data_flow_graph.flow_nodes: + all_tensor_versions.update(((k, v) for k, (_, v) in node.inputs.items())) + all_tensor_versions.update((key, 0) for key in node.intermediates) + all_tensor_versions.update(node.outputs.items()) + + for i in self._categories._values.values(): + all_tensor_versions.update((key, 0) for key in i._by_id_keyset) + + return { + (key, version): self._categories.get(key, version) + for key, version in sorted(all_tensor_versions) + } + + def _any_version_depends_on_gradient(self) -> Set[int]: + """Extract IDs of Tensors which depend or will depend on a gradient. + + Note that this weakened definition of "depends" requires us to loop + over the data flow graph multiple times because it allows dependency + information to flow backward through edges and removes the guarantee + that nodes are topologically sorted. (Or indeed, even that a valid + topological order exists.) Put another way, we have converted an + acyclic data flow graph into a cyclic graph and we are attempting to + partition cycles involving a gradient from the rest of the graph. + """ + depends_on_gradient: Set[int] = set() + while True: + start_size = len(depends_on_gradient) + for node in self._data_flow_graph.flow_nodes: + ids = tuple( + key.id + for key, (_, version) in node.inputs.items() + if self._categories.get(key, version) + in (Category.GRADIENT, Category.PARAMETER) + or key.id in depends_on_gradient + ) + + if ids: + depends_on_gradient.update(ids) + depends_on_gradient.update(key.id for key in node.outputs) + + # We are guaranteed to exit because there is a finite set of + # TensorAndID pairs. In practice we do not expect to loop more than + # three times: once to identify the core parameter update loop, + # once to fold the first step into that loop, and a third time + # where no new elements are added. + if len(depends_on_gradient) == start_size: + return depends_on_gradient + + def _set_gradients_and_temporaries(self) -> None: + """Mark Tensors which are unambiguous and simple to reason about.""" + + # Gradients are straightforward to detect. We directly check the + # `.grad` property in the Python tracer, and we can detect any new + # gradient Tensors from `AccumulateGrad` ops. + for event in self._op_tree.dfs(): + for _, p_grad in extract_gradients(event): + self._categories.set_by_id(p_grad, Category.GRADIENT) + + # Similarly, temporary Tensors are easy to identify and are useful to + # flag since they can make memory use "spikier" than one would + # otherwise expect. + for node in self._data_flow_graph.flow_nodes: + for i in node.intermediates: + self._categories.set_by_key(i, Category.TEMPORARY) + + def _set_parameters_using_python_tracer(self) -> None: + for event in self._op_tree.dfs(): + for p in extract_parameters(event): + if p is not None: + self._categories.set_by_id(p, Category.PARAMETER) + + def _set_inputs(self) -> None: + """Mark inputs based on which Tensors are updated using gradients. + + The process for differentiating between inputs and activations is more + involved. Most Tensors in a training loop depend on at least one + gradient: parameters depend on them through updates, and activations + and optimizer state depend on them transitively through parameters. + Critically, we do not need to know which Tensors are parameters to + apply this method; we can simply walk the data flow graph to build the + set of all values which depend on a gradient and then obtain the set + of inputs from the conjugate set. + + There is, however, one hiccup. The first time we see a parameter is + generally on the forward pass of the first step. We know from + inspection of the data flow graph that v1 of that Tensor depends on + a gradient (provided we profile an optimizer step), but not v0. To + address this problem we weaken the definition of "depends on a + gradient" to "any version of this Tensor depends on a gradient", + which in turn strengthens the criteria for the input set enough to + filter the activations in the forward pass of the first step.""" + + # All of this analysis is predicated on using at least one training + # step (or parameters from the python tracer) to partition the graph. + # Absent that we cannot determine which Tensors are inputs and which + # ones are part of the model. + depends_on_gradient = self._any_version_depends_on_gradient() + + # We only want to annotate Tensors which actually contribute to the + # model calculation. + produces_gradient: Set[TensorAndID] = set() + for node in reversed(self._data_flow_graph.flow_nodes): + tensors = {(key, version) for key, (_, version) in node.inputs.items()} + tensors |= node.outputs.items() + if any( + self._categories.get(*i) in (Category.GRADIENT, Category.PARAMETER) + or i in produces_gradient + for i in tensors + ): + produces_gradient |= tensors + + # Don't include Tensors created in the backward pass, as these are + # generally Autograd implementation details rather than proper inputs. + input_candidates = produces_gradient.copy() + for node in self._data_flow_graph.flow_nodes: + if RecordScope.BACKWARD_FUNCTION in get_scopes(node._event): + input_candidates -= set(node.outputs.items()) + + for key, version in input_candidates: + if key.id not in depends_on_gradient: + self._categories.setdefault_by_version(key, version, Category.INPUT) + + def _set_parameters_using_data_flow(self) -> None: + """Deduce which Tensors are parameters. + + Consider the following code for the step of SGD with momentum + (nesterov=False), where `d_p` is the gradient of `param` and `buf` is + the momentum buffer. + ``` + buf.mul_(momentum).add_(d_p, alpha=1 - dampening) + d_p = buf + param.add_(d_p, alpha=-lr) + ``` + Both `param` and `buf` take a gradient and perform an in-place update. + + The python tracer will inspect calls to `nn.Module.forward` and + `optim.Optimizer.step` to extract parameter and optimizer state + respectively (including parameters), so this is generally a non-issue. + + However as a fallback we can also exploit several properties of + parameters to distinguish them from other model state. + + First, they are directly used in the forward pass. (At this point we + haven't established which parts of the graph correspond to the forward + pass but we can deduce enough to suffice.) Some mutable state such as + batch norm moving averages also contribute to the forward pass, but + optimizer state does not. + + Second, a parameter is by definition used to compute at least one + gradient and depends on at least one gradient. + """ + snapshot = self._category_snapshot() + + # Determine which Tensors might be parameters based on forward pass + # data flow. Note this these are only candidates; we filter nodes that + # we know are part of the backward pass but that doesn't guarantee that + # they are part of the forward pass. + candidate_parameters: Set[TensorAndID] = set() + candidate_fwd_tensors: Set[TensorAndID] = { + i for i, category in snapshot.items() if category == Category.INPUT + } + + for node in self._data_flow_graph.flow_nodes: + inputs = {(key, value) for key, (_, value) in node.inputs.items()} + if ( + # Don't check nodes in the backward pass. + RecordScope.BACKWARD_FUNCTION not in get_scopes(node._event) + and not any(self._is_gradient(*i) for i in inputs) + and not any(self._is_gradient(*i) for i in node.outputs.items()) + # + # and only check nodes which depend on an input. + and candidate_fwd_tensors.intersection(inputs) + ): + candidate_fwd_tensors |= node.outputs.items() + candidate_parameters |= inputs.difference(candidate_fwd_tensors) + + # Require that each parameter eventually contributes to the value of a gradient + used_for_gradient: Set[TensorAndID] = set() + for node in reversed(self._data_flow_graph.flow_nodes): + if any( + self._is_gradient(*i) or i in used_for_gradient + for i in node.outputs.items() + ): + for key, (_, version) in node.inputs.items(): + used_for_gradient.add((key, version)) + candidate_parameters.intersection_update(used_for_gradient) + + # and depends on a gradient. + parameter_keys = {key.id for key, _ in candidate_parameters} + parameter_keys &= self._any_version_depends_on_gradient() + + for key, _ in snapshot.keys(): + if key.id in parameter_keys: + self._categories.set_by_id(key, Category.PARAMETER) + + def _set_activations(self) -> None: + """Flood the graph to identify activations.""" + + required = {Category.INPUT, Category.ACTIVATION} + also_allowed = {Category.PARAMETER, Category.TEMPORARY} + for node in self._data_flow_graph.flow_nodes: + inputs = {(key, value) for key, (_, value) in node.inputs.items()} + input_categories = {self._categories.get(*i) for i in inputs} + + if ( + (input_categories & required) + and not (input_categories - (required | also_allowed)) + # + # Stop filling when we reach the backward pass. + and RecordScope.BACKWARD_FUNCTION not in get_scopes(node._event) + ): + for i in node.outputs.items(): + self._categories.setdefault_by_version(*i, Category.ACTIVATION) + + def _set_optimizer_state(self) -> None: + for event in self._op_tree.dfs(): + if event.typed[0] == _EventType.PyCall and event.typed[1].optimizer: + parameters = event.typed[1].optimizer.parameters + for _, t in it.chain(*[state for _, _, state in parameters]): + key = TensorKey.from_tensor(t) + if key is not None: + self._categories.set_by_id(key, Category.OPTIMIZER_STATE) + + def _set_autograd_detail(self): + prior = {None, Category.AUTOGRAD_DETAIL} + for node in self._data_flow_graph.flow_nodes: + if RecordScope.BACKWARD_FUNCTION in get_scopes(node._event): + for key, version in node.outputs.items(): + if version == 0 or self._categories.get(key, version - 1) in prior: + self._categories.setdefault_by_version( + key, version, Category.AUTOGRAD_DETAIL + ) + + +class MemoryProfileTimeline: + def __init__(self, memory_profile): + """The minimum representation of the memory profile timeline + includes the memory timeline and categories. The timeline + consists of [timestamp, action, (TensorKey, version), numbytes] + elements, to denote any actions (pre-existing, create, destroy, + or increment_version) that occurred to a specific Tensor for a + chunk of memory. The categories help map each (TensorKey, + version) pair into a category.""" + self.timeline = memory_profile.timeline + self.categories = memory_profile._categories + + def _coalesce_timeline(self, device_str): + """Convert the memory timeline and categories into a memory plot + consisting of timestamps and their respective sizes by category + for a given device. + + Input: device + Output: [timestamps, sizes by category] + """ + device = torch.device(device_str) + times: List[int] = [] + sizes: List[List[int]] = [] + + def update(key, version, delta): + category = ( + self.categories.get(key, version) + if isinstance(key, TensorKey) + else None + ) + index = _CATEGORY_TO_INDEX[category] + 1 + sizes[-1][index] += int(delta) + + t_min = -1 + for t, action, (key, version), numbytes in self.timeline: + if key.device != device: + continue + + # Convert timestamps from ns to us, to match trace events. + if t != -1: + t = int(t / 1000) + + # Save the smallest timestamp to populate pre-existing allocs. + if t_min == -1 or (t < t_min and t > 0): + t_min = t + + # Handle timestep + if len(times) == 0: + times.append(t) + sizes.append([0] + [0 for _ in _CATEGORY_TO_INDEX]) + + elif t != times[-1]: + times.append(t) + sizes.append(sizes[-1].copy()) + + # Handle memory and categories + if action in (Action.PREEXISTING, Action.CREATE): + update(key, version, numbytes) + + elif action == Action.INCREMENT_VERSION: + update(key, version, -numbytes) + update(key, version + 1, numbytes) + + elif action == Action.DESTROY: + update(key, version, -numbytes) + + else: + raise ValueError(f"Unknown action: {action}") + + times = [t_min if t < 0 else t for t in times] + return times, sizes + + def export_memory_timeline(self, path, device) -> None: + """Saves the memory timeline as [times, sizes by category] + as a JSON formatted file to the given path for the given + device.""" + times, sizes = self._coalesce_timeline(device) + # TODO: Write a faster serialize (orjson not available in CI) + import json + + with open(path, "w") as f: + json.dump([times, sizes], f) + + def export_memory_timeline_raw(self, path, device_str) -> None: + """Saves the memory timeline as raw memory event tuples in the + form of (timestamp, action, numbytes, category) + as a JSON formatted file to the given path for the given + device.""" + device = torch.device(device_str) + raw_events: List[Tuple[int, int, int, int]] = [] + + def get_category_index(key, version): + category = ( + self.categories.get(key, version) + if isinstance(key, TensorKey) + else None + ) + return _CATEGORY_TO_INDEX[category] + + for t, action, (key, version), numbytes in self.timeline: + if key.device != device: + continue + + if action in (Action.PREEXISTING, Action.CREATE): + raw_events.append( + ( + t, + _ACTION_TO_INDEX[action], + numbytes, + get_category_index(key, version), + ) + ) + + elif action == Action.INCREMENT_VERSION: + raw_events.append( + ( + t, + _ACTION_TO_INDEX[action], + -numbytes, + get_category_index(key, version), + ) + ) + raw_events.append( + ( + t, + _ACTION_TO_INDEX[action], + numbytes, + get_category_index(key, version + 1), + ) + ) + + elif action == Action.DESTROY: + raw_events.append( + ( + t, + _ACTION_TO_INDEX[action], + -numbytes, + get_category_index(key, version), + ) + ) + + else: + raise ValueError(f"Unknown action: {action}") + + import json + + with open(path, "w") as f: + json.dump(raw_events, f) + + def export_memory_timeline_html( + self, path, device, figsize=(20, 12), title=None + ) -> None: + """Exports the memory timeline as an HTML file which contains + the memory timeline plot embedded as a PNG file.""" + # Check if user has matplotlib installed, return gracefully if not. + import importlib.util + + matplotlib_spec = importlib.util.find_spec("matplotlib") + if matplotlib_spec is None: + print( + "export_memory_timeline_html failed because matplotlib was not found." + ) + return + + from base64 import b64encode + from os import remove + from tempfile import NamedTemporaryFile + + import matplotlib.pyplot as plt + import numpy as np + + mt = self._coalesce_timeline(device) + times, sizes = np.array(mt[0]), np.array(mt[1]) + # For this timeline, start at 0 to match Chrome traces. + t_min = min(times) + times -= t_min + stacked = np.cumsum(sizes, axis=1) / 1024**3 + max_memory_allocated = torch.cuda.max_memory_allocated() + max_memory_reserved = torch.cuda.max_memory_reserved() + + # Plot memory timeline as stacked data + fig = plt.figure(figsize=figsize, dpi=80) + axes = fig.gca() + for category, color in _CATEGORY_TO_COLORS.items(): + i = _CATEGORY_TO_INDEX[category] + axes.fill_between( + times / 1e3, stacked[:, i], stacked[:, i + 1], color=color, alpha=0.7 + ) + fig.legend(["Unknown" if i is None else i.name for i in _CATEGORY_TO_COLORS]) + # Usually training steps are in magnitude of ms. + axes.set_xlabel("Time (ms)") + axes.set_ylabel("Memory (GB)") + title = "\n\n".join( + ([title] if title else []) + + [ + f"Max memory allocated: {max_memory_allocated/(10**9):.2f} GB \n" + f"Max memory reserved: {max_memory_reserved/(10**9):.2f} GB" + ] + ) + axes.set_title(title) + + # Embed the memory timeline image into the HTML file + tmpfile = NamedTemporaryFile("wb", suffix=".png", delete=False) + tmpfile.close() + fig.savefig(tmpfile.name, format="png") + + with open(tmpfile.name, "rb") as tmp: + encoded = b64encode(tmp.read()).decode("utf-8") + html = f""" +GPU Memory Timeline HTML + + + +""" + + with open(path, "w") as f: + f.write(html) + remove(tmpfile.name) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/_pattern_matcher.py b/env-llmeval/lib/python3.10/site-packages/torch/profiler/_pattern_matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..02e9b014d3080ad584374db9fad7cdae97bb496e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/profiler/_pattern_matcher.py @@ -0,0 +1,662 @@ +import json +import math +import os +import re +from typing import Dict, List, Optional, Set + +import torch +import torch.utils.benchmark as benchmark +from torch._C._profiler import ( + _EventType, + _ExtraFields_PyCall, + _ExtraFields_PyCCall, + _ExtraFields_TorchOp, + _ProfilerEvent, +) +from torch.profiler import profile +from torch.profiler._utils import index_of_first_match, traverse_bfs, traverse_dfs + + +class Pattern: + """ + Base class for all patterns, subclass this class and implement match() + to define custom patterns. + + In subclass, define description and skip property. + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + self.prof = prof + self.should_benchmark = should_benchmark + self.name = "Please specify a name for pattern" + self.description = "Please specify a description for pattern" + self.url = "" + assert prof.profiler is not None and prof.profiler.kineto_results is not None + self.event_tree = prof.profiler.kineto_results.experimental_event_tree() + self.tid_root: Dict[int, List[_ProfilerEvent]] = {} + for event in self.event_tree: + self.tid_root.setdefault(event.start_tid, []).append(event) + + @property + def skip(self): + return False + + def report(self, event: _ProfilerEvent): + msg = ( + f"{self.description}\n[Source Code Location] {source_code_location(event)}" + ) + return msg + + def eventTreeTraversal(self): + """ + Traverse the event tree and yield all events. + Override this method in subclass to customize the traversal. + """ + yield from traverse_dfs(self.event_tree) + + def summary(self, events: List[_ProfilerEvent]): + default_summary = f"{self.name}: {len(events)} events matched." + if self.should_benchmark: + # If benchmark summary is not empty, use it. + return ( + self.benchmark_summary(events) + if hasattr(self, "benchmark") # type: ignore[attr-defined] + else default_summary + ) + return default_summary + + def benchmark_summary(self, events: List[_ProfilerEvent]): + def format_time(time_ns: int): + unit_lst = ["ns", "us", "ms"] + for unit in unit_lst: + if time_ns < 1000: + return f"{time_ns:.2f} {unit}" + time_ns //= 1000 + return f"{time_ns:.2f} s" + + assert hasattr(self, "benchmark"), "Please implement benchmark()" + shapes_factor_map = self.benchmark(events) # type: ignore[attr-defined] + original_time = sum(event.duration_time_ns for event in events) + new_time = sum( + shapes_factor_map[input_shapes(event)] * event.duration_time_ns + for event in events + ) + return ( + f"{self.name}: {len(events)} events matched. " + f"Total Estimated Speedup: {format_time(original_time - new_time)} ({round(original_time/new_time, 2)}X)" + ) + + def match(self, event: _ProfilerEvent): + """ + Return True if the event matches the pattern. + This method should be overriden in subclass. + """ + raise NotImplementedError + + def matched_events(self): + if self.skip: + return [] + matched_events = [] + for event in self.eventTreeTraversal(): + if self.match(event): + matched_events.append(event) + return matched_events + + def root_of(self, event: _ProfilerEvent): + while event.parent: + event = event.parent + return event + + def siblings_of(self, event: _ProfilerEvent): + if event.parent: + children = event.parent.children + else: + children = self.tid_root[event.start_tid] + index = children.index(event) + return children[:index], children[index + 1 :] + + def next_of(self, event: _ProfilerEvent): + _, next_events = self.siblings_of(event) + return next_events[0] if next_events else None + + def prev_of(self, event: _ProfilerEvent): + prev_events, _ = self.siblings_of(event) + return prev_events[-1] if prev_events else None + + def go_up_until(self, event: _ProfilerEvent, predicate): + if not event: + return None + while event.parent and not predicate(event): + event = event.parent + return event + + +# Patterns + + +class NamePattern(Pattern): + def __init__(self, prof: profile, name: str, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.description = f"Matched Name Event: {name}" + self.name = name + + def match(self, event: _ProfilerEvent): + return re.search(self.name, event.name) is not None + + +class ExtraCUDACopyPattern(Pattern): + """ + This pattern identifies if we creates a constant tensor on CPU and immediately moves it to GPU. + example: torch.zeros((100, 100)).to("cuda") + + Pattern: + build-in method |build-in method + ... | aten::to + aten::fill_/aten::zero_ | aten::_to_copy + + Algorithm: + We start at node aten::to, go parent events' previous events, + and check if we have a aten::fill_/aten::zero_ as we keep going down the tree. + We always select the last child in the children list when we go down the tree. + If at any step we failed, it is not a match. + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "Extra CUDA Copy Pattern" + self.description = "Filled a CPU tensor and immediately moved it to GPU. Please initialize it on GPU." + self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#create-tensors-directly-on-the-target-device" + self.init_ops = { + "aten::fill_", + "aten::zero_", + "aten::normal_", + "aten::uniform_", + } + + @property + def skip(self): + return not self.prof.with_stack or not self.prof.record_shapes + + def match(self, event): + # TODO: We should also check tensor identities + if event.name != "aten::to": + return False + to_event = event + if not event.children: + return False + event = event.children[-1] + if event.name != "aten::_to_copy": + return False + if not event.children: + return False + event = event.children[-1] + if event.name != "aten::copy_": + return False + # aten::copy_ should have the first 2 args dtype the same + dtypes = input_dtypes(event) + if len(dtypes) < 2: + return False + if dtypes[0] is None or dtypes[0] != dtypes[1]: + return False + event = to_event + # Up one level + event = event.parent + if event is None: + return False + # Check if we have a aten::fill_ in previous leaf + event = self.prev_of(event) + if event is None: + return False + while event.children: + event = event.children[-1] + # aten::zero_ is a special optimzation case where fill_ is not called + if event.name in self.init_ops: + return True + return event.name in self.init_ops + # TODO: Check if tensor is reused + + def benchmark(self, events: List[_ProfilerEvent]): + shapes_factor_map = {input_shapes(event): 0.0 for event in events} + for shape in shapes_factor_map: + size = shape[0] + to_timer = benchmark.Timer( + stmt='torch.ones(size).to("cuda")', globals={"size": size} + ) + de_timer = benchmark.Timer( + stmt='torch.ones(size, device="cuda")', globals={"size": size} + ) + to_time = to_timer.timeit(10).mean + de_time = de_timer.timeit(10).mean + shapes_factor_map[shape] = de_time / to_time + return shapes_factor_map + + +class ForLoopIndexingPattern(Pattern): + """ + This pattern identifies if we use a for loop to index a tensor that + can be vectorized. + example: + tensor = torch.empty((100, 100)) + for i in range(100): + tensor[i] = i + + Pattern: + aten::select | ... | aten::select | ... (Repeat) + + Algorithm: + We start at node aten::select, and we check if we can find this alternating patterns. + We also keep a dictionary to avoid duplicate match in the for loop. + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "For Loop Indexing Pattern" + self.description = "For loop indexing detected. Vectorization recommended." + self.visited: Set[int] = set() + + def eventTreeTraversal(self): + """ + We need to use BFS traversal order to avoid duplicate match. + """ + yield from traverse_bfs(self.event_tree) + + def match(self, event: _ProfilerEvent): + if event.name != "aten::select": + return False + if event.id in self.visited: + return False + repeat_count = 1 + _, next = self.siblings_of(event) + if len(next) <= 1: + return False + + # Custom event list matching + def same_ops(list1, list2): + if len(list1) != len(list2): + return False + for op1, op2 in zip(list1, list2): + if op1.name != op2.name: + return False + return True + + # Record the ops between two aten::select + next_select_idx = index_of_first_match(next, lambda e: e.name == "aten::select") + if next_select_idx is None: + return False + indexing_ops = [event] + next[:next_select_idx] + next = next[len(indexing_ops) - 1 :] + for i in range(0, len(next), len(indexing_ops)): + if same_ops(indexing_ops, next[i : i + len(indexing_ops)]): + repeat_count += 1 + self.visited.add(next[i].id) + else: + break + return repeat_count >= 10 + + +class FP32MatMulPattern(Pattern): + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "FP32 MatMul Pattern" + self.description = ( + "You are currently using GPU that supports TF32. " + "Please enable TF32 by setting 'torch.backends.cuda.matmul.allow_tf32 = True'" + ) + self.url = "https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" + + @property + def skip(self): + if torch.version.hip is not None: + has_tf32 = False + else: + # Anything less than sm_80 is not Ampere which doesn't support TF32 + has_tf32 = all(int(arch[3:]) >= 80 for arch in torch.cuda.get_arch_list()) + return has_tf32 is False or super().skip or not self.prof.record_shapes + + def match(self, event: _ProfilerEvent): + # If we saw this pattern once, we don't need to match it again + if event.tag != _EventType.TorchOp: + return False + assert isinstance(event.extra_fields, _ExtraFields_TorchOp) + if event.name == "aten::mm": + if event.extra_fields.allow_tf32_cublas is False: + return True + return False + + def report(self, event: _ProfilerEvent): + return self.description + + def benchmark(self, events: List[_ProfilerEvent]): + shapes_factor_map = {input_shapes(event): 0.0 for event in events} + for shape in shapes_factor_map: + matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float32) + matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float32) + fp32_timer = benchmark.Timer( + stmt="torch.mm(matrixA, matrixB)", + globals={"matrixA": matrixA, "matrixB": matrixB}, + ) + tf32_timer = benchmark.Timer( + stmt="torch.mm(matrixA, matrixB)", + setup="torch.backends.cuda.matmul.allow_tf32 = True", + globals={"matrixA": matrixA, "matrixB": matrixB}, + ) + torch.backends.cuda.matmul.allow_tf32 = False + fp32_time = fp32_timer.timeit(10).mean + tf32_time = tf32_timer.timeit(10).mean + shapes_factor_map[shape] = tf32_time / fp32_time + return shapes_factor_map + + +class OptimizerSingleTensorPattern(Pattern): + """ + This pattern identifies if we are using the single-tensor version of an optimizer. + example: + optimizer = torch.optim.SGD(model.parameters(), lr=0.1) + By adding foreach=True to enable multi-tensor optimizer, we can gain speedup when + the kernels are relatively small. + + Pattern: + XXXXX: _single_tenser_ + + Algorithm: + String match + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "Optimizer Single Tensor Pattern" + self.optimizers_with_foreach = ["adam", "sgd", "adamw"] + self.description = ( + "Deteced optimizer running with single tensor implementation. " + "Please enable multi tensor implementation by passing 'foreach=True' into optimizer." + ) + self.url = "" + + def match(self, event: _ProfilerEvent): + for optimizer in self.optimizers_with_foreach: + if event.name.endswith(f"_single_tensor_{optimizer}"): + return True + return False + + +class SynchronizedDataLoaderPattern(Pattern): + """ + This pattern identifies if we are using num_workers=0 in DataLoader. + example: + torch.utils.data.DataLoader(dataset, batch_size=batch_size) + Add num_workers=N to the arguments. N depends on system configuration. + + Pattern: + dataloader.py(...): __iter__ + dataloader.py(...): _get_iterator + NOT dataloader.py(...): check_worker_number_rationality + + Algorithm: + If we don't see check_worker_number_rationality call in the dataloader __iter__, + It is not an asynchronous dataloader. + + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "Synchronized DataLoader Pattern" + self.description = ( + "Detected DataLoader running with synchronized implementation. " + "Please enable asynchronous dataloading by setting num_workers > 0 when initializing DataLoader." + ) + self.url = ( + "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html" + "#enable-async-data-loading-and-augmentation" + ) + + def match(self, event: _ProfilerEvent): + def is_dataloader_function(name: str, function_name: str): + return name.startswith( + os.path.join("torch", "utils", "data", "dataloader.py") + ) and name.endswith(function_name) + + # TODO: fixme! Due to lifetime issues of the function name, this field might + # actually point to an already freed string when the even is a PyCall. + # Just silently skip this to unblock testing. + try: + event.name + except UnicodeDecodeError: + return False + + if not is_dataloader_function(event.name, "__iter__"): + return False + if not event.children: + return False + event = event.children[0] + if not is_dataloader_function(event.name, "_get_iterator"): + return False + if not event.children: + return False + event = event.children[0] + return not is_dataloader_function(event.name, "check_worker_number_rationality") + # TODO: We should also check if the loader is bottleneck. + + +class GradNotSetToNonePattern(Pattern): + """ + This pattern identifies if we are not setting grad to None in zero_grad. + example: + optimizer.zero_grad() + By setting set_to_none=True, we can gain speedup + + Pattern: + XXXXX: _zero_grad + NOT aten::zeros + aten::zero_ + + aten::zero_ is called on each parameter in the model. + We also want to make sure it is not called by aten::zeros. + + Algorithm: + String match + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "Gradient Set To Zero Instead of None Pattern" + self.description = ( + "Detected gradient set to zero instead of None. " + "Please add 'set_to_none=True' when calling zero_grad()." + ) + self.url = ( + "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html" + "#disable-gradient-calculation-for-validation-or-inference" + ) + + def match(self, event: _ProfilerEvent): + if not event.name.endswith(": zero_grad"): + return False + if not event.children: + return False + + for sub_event in traverse_dfs(event.children): + if ( + sub_event.name == "aten::zero_" + and sub_event.parent.name != "aten::zeros" + ): + return True + # TODO: We should also check if the optimizer's numerical behavior will change. + return False + + +class Conv2dBiasFollowedByBatchNorm2dPattern(Pattern): + """ + This pattern identifies if we are enabling bias in Conv2d which is followed by BatchNorm2d. + Bias doesn't do anything when followed by batchnorm. + Pattern: + nn.Module: Conv2d | nn.Module: BatchNorm2d + ... + aten::conv2d AND dtype of third argument is not null + The third argument is the bias + Algorithm: + String match + """ + + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "Enabling Bias in Conv2d Followed By BatchNorm Pattern" + self.description = "Detected bias enabled in Conv2d that is followed by BatchNorm2d. Please set 'bias=False' in Conv2d." + self.url = ( + "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html" + "#disable-bias-for-convolutions-directly-followed-by-a-batch-norm" + ) + + @property + def skip(self): + return self.prof.record_shapes is False or super().skip + + def match(self, event: _ProfilerEvent): + if event.name != "aten::conv2d": + return False + if len(input_dtypes(event)) < 3 or input_dtypes(event)[2] is None: + return False + # This means bias=True + event = self.go_up_until( + event, lambda e: e.name.startswith("nn.Module: Conv2d") + ) + if not event: + return False + event = self.next_of(event) + if not event: + return False + return event.name.startswith("nn.Module: BatchNorm2d") + + +class MatMulDimInFP16Pattern(Pattern): + def __init__(self, prof: profile, should_benchmark: bool = False): + super().__init__(prof, should_benchmark) + self.name = "Matrix Multiplication Dimension Not Aligned Pattern" + self.description = "Detected matmul with dimension not aligned. Please use matmul with aligned dimension." + self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#use-mixed-precision-and-amp" + + @property + def skip(self): + return not self.prof.with_stack or not self.prof.record_shapes + + def match(self, event: _ProfilerEvent): + def mutiple_of(shapes, multiple): + return all(dim % multiple == 0 for shape in shapes for dim in shape[-2:]) + + if event.name not in ("aten::mm", "aten::bmm", "aten::addmm"): + return False + if not input_dtypes(event): + return False + arg_dtype = input_dtypes(event)[0] + if arg_dtype in (torch.bfloat16, torch.half) and not mutiple_of( + input_shapes(event), 8 + ): + return True + return False + + def benchmark(self, events: List[_ProfilerEvent]): + def closest_multiple(shapes, multiple): + return [multiple * math.ceil(shape / multiple) for shape in shapes] + + shapes_factor_map = {input_shapes(event): 0.0 for event in events} + for shape in shapes_factor_map: + matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float16) + matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float16) + not_aligned_dim_timer = benchmark.Timer( + stmt="torch.mm(matrixA, matrixB)", + globals={"matrixA": matrixA, "matrixB": matrixB}, + ) + matrixA = torch.randn( + closest_multiple(shape[0], 8), device="cuda", dtype=torch.float16 + ) + matrixB = torch.randn( + closest_multiple(shape[1], 8), device="cuda", dtype=torch.float16 + ) + aligned_dim_timer = benchmark.Timer( + stmt="torch.mm(matrixA, matrixB)", + globals={"matrixA": matrixA, "matrixB": matrixB}, + ) + not_aligned_dim_time = not_aligned_dim_timer.timeit(10).mean + aligned_dim_time = aligned_dim_timer.timeit(10).mean + shapes_factor_map[shape] = aligned_dim_time / not_aligned_dim_time + return shapes_factor_map + + +def source_code_location(event: Optional[_ProfilerEvent]): + while event: + if event.tag == _EventType.PyCall or event.tag == _EventType.PyCCall: + assert isinstance( + event.extra_fields, (_ExtraFields_PyCall, _ExtraFields_PyCCall) + ) + if not event.extra_fields.caller.file_name.startswith("torch" + os.sep): + return f"{event.extra_fields.caller.file_name}:{event.extra_fields.caller.line_number}" + event = event.parent + return "No source code location found" + + +def input_shapes(event: _ProfilerEvent): + assert isinstance(event.extra_fields, _ExtraFields_TorchOp) + return tuple(tuple(getattr(i, "sizes", ())) for i in event.extra_fields.inputs) + + +def input_dtypes(event: _ProfilerEvent): + assert isinstance(event.extra_fields, _ExtraFields_TorchOp) + return tuple(getattr(i, "dtype", None) for i in event.extra_fields.inputs) + + +def report_all_anti_patterns( + prof, + should_benchmark: bool = False, + print_enable: bool = True, + json_report_dir: Optional[str] = None, +): + report_dict: Dict = {} + anti_patterns = [ + ExtraCUDACopyPattern(prof, should_benchmark), + # ForLoopIndexingPattern(prof, should_benchmark), + FP32MatMulPattern(prof, should_benchmark), + OptimizerSingleTensorPattern(prof, should_benchmark), + SynchronizedDataLoaderPattern(prof, should_benchmark), + GradNotSetToNonePattern(prof, should_benchmark), + Conv2dBiasFollowedByBatchNorm2dPattern(prof, should_benchmark), + MatMulDimInFP16Pattern(prof, should_benchmark), + ] + reported = set() + summaries = [] + message_list = [f"{'-'*40}TorchTidy Report{'-'*40}"] + message_list.append("Matched Events:") + + for anti_pattern in anti_patterns: + matched_events = anti_pattern.matched_events() + if not matched_events: + continue + summaries.append(anti_pattern.summary(matched_events)) + for event in matched_events: + report_msg = anti_pattern.report(event) + if report_msg not in reported: + message_list.append(report_msg) + reported.add(report_msg) + src_location, line_no = source_code_location(event).split(":") + report_dict.setdefault(src_location, []).append( + { + "line_number": int(line_no), + "name": anti_pattern.name, + "url": anti_pattern.url, + "message": anti_pattern.description, + } + ) + + if json_report_dir is not None: + json_report_path = os.path.join(json_report_dir, "torchtidy_report.json") + if os.path.exists(json_report_path): + with open(json_report_path) as f: + exisiting_report = json.load(f) + exisiting_report.update(report_dict) + report_dict = exisiting_report + with open(json_report_path, "w") as f: + json.dump(report_dict, f, indent=4) + + message_list.append("Summary:") + message_list += summaries + message_list.append(f"{'-'*40}TorchTidy Report{'-'*40}") + if print_enable: + print("\n".join(message_list)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/profiler/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cb9469e4c983ae9c0a64650b70a30c94ea0caabd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/profiler/_utils.py @@ -0,0 +1,373 @@ +import functools +import re +from collections import deque +from dataclasses import dataclass +from typing import Dict, List + +from torch.autograd import _KinetoEvent +from torch.autograd.profiler import profile + +from torch.profiler import DeviceType + + +def _traverse(tree, next_fn, children_fn=lambda x: x.children, reverse: bool = False): + order = reversed if reverse else lambda x: x + remaining = deque(order(tree)) + while remaining: + curr_event = next_fn(remaining) + yield curr_event + for child_event in order(children_fn(curr_event)): + remaining.append(child_event) + + +traverse_dfs = functools.partial(_traverse, next_fn=lambda x: x.pop(), reverse=True) +traverse_bfs = functools.partial( + _traverse, next_fn=lambda x: x.popleft(), reverse=False +) + + +@dataclass +class EventMetrics: + duration_time_ns: int = 0 + self_time_ns: int = 0 + idle_time_ns: int = 0 + queue_depth: int = 0 + + @property + def fraction_idle_time(self): + if self.duration_time_ns == 0: + return 0.0 + return self.idle_time_ns / self.duration_time_ns + + +@dataclass +class Interval: + start: int + end: int + queue_depth: int = 0 + + +class EventKey: + def __init__(self, event): + self.event = event + + def __hash__(self): + return hash(self.event.id) + + def __eq__(self, other): + return self.event.id == other.event.id + + def __repr__(self): + return f"{self.event.name}" + + def intervals_overlap(self, intervals: List[Interval]): + overlap_time = 0 + intervals = sorted(intervals, key=lambda x: x.start) + + if intervals: + overlap_start = max(self.event.start_time_ns, intervals[0].start) + overlap_end = min(self.event.end_time_ns, intervals[0].end) + + if overlap_start < overlap_end: + overlap_time += overlap_end - overlap_start + + i, j = 0, 1 + while j < len(intervals): + prev_interval = intervals[i] + curr_interval = intervals[j] + j += 1 + if prev_interval.end > curr_interval.start: + # Completely subsumed by previous interval + if prev_interval.end > curr_interval.end: + j += 1 + continue + else: + curr_interval.start = prev_interval.end + i = j + + overlap_start = max(self.event.start_time_ns, curr_interval.start) + overlap_end = min(self.event.end_time_ns, curr_interval.end) + if overlap_start < overlap_end: + overlap_time += overlap_end - overlap_start + + return overlap_time + + +class BasicEvaluation: + def __init__(self, prof: profile): + self.profile = prof + self.metrics: Dict[EventKey, EventMetrics] = {} + self.compute_self_time() + self.event_keys = sorted( + (e for e in self.metrics.keys()), key=lambda x: x.event.start_time_ns + ) + self.events = [e.event for e in self.event_keys] + self.cuda_events: List[_KinetoEvent] = [] + self.queue_depth_list = self.compute_queue_depth() + self.compute_idle_time() + + def compute_self_time(self): + """ + Computes event's self time(total time - time in child ops). + """ + assert self.profile.kineto_results is not None + stack = deque(self.profile.kineto_results.experimental_event_tree()) + + # standard iterating dfs + while stack: + curr_event = stack.pop() + self_time = curr_event.duration_time_ns + for child_event in curr_event.children: + self_time -= child_event.duration_time_ns + stack.append(child_event) + assert ( + EventKey(curr_event) not in self.metrics + ), f"Duplicate id: {curr_event.id}, {curr_event.name}" + self.metrics[EventKey(curr_event)] = EventMetrics(self_time_ns=self_time) + self.metrics[ + EventKey(curr_event) + ].duration_time_ns = curr_event.duration_time_ns + + def compute_queue_depth(self): + """ + Computes queue_depth at each event. This will calculate the queue depth data for + All the events in the tree. + This will return a list of Interval of queue depth data of cuda launch and kernels. + """ + assert self.profile.kineto_results is not None + cuda_event_list = self.profile.kineto_results.events() + + def is_cuda_launch_kernel(e): + # TODO: find a better way to identify cudaLaunchKernel + return e.name == "cudaLaunchKernel" + + def is_cuda_kernel(e): + # TODO: find a better way to identify CUDA Kernel + return e.device_type() == DeviceType.CUDA and "mem" not in e.name.lower() + + cuda_launch_events = sorted( + (e for e in cuda_event_list if is_cuda_launch_kernel(e)), + key=lambda x: x.start_us(), + ) + cuda_kernel_events = sorted( + (e for e in cuda_event_list if is_cuda_kernel(e)), + key=lambda x: x.start_us(), + ) + + self.cuda_events = sorted( + cuda_launch_events + cuda_kernel_events, key=lambda x: x.start_us() + ) + + kernel_mapping: Dict[_KinetoEvent, int] = {} + last_mapped_kernel = 0 + for cuda_launch_event in cuda_launch_events: + index = index_of_first_match( + cuda_kernel_events, + lambda x: x.linked_correlation_id() + == cuda_launch_event.linked_correlation_id(), + start=last_mapped_kernel, + ) + kernel_mapping[cuda_launch_event] = index + last_mapped_kernel = index if index is not None else last_mapped_kernel + + current_kernel_index = 0 + spawned_kernel_index = -1 + + all_events = cuda_launch_events + cuda_kernel_events + self.events + + def new_old_event_comparator(event): + if hasattr(event, "start_us"): + return event.start_us() * 1000 + if hasattr(event, "start_time_ns"): + return event.start_time_ns + raise Exception("Unknown Event Type") + + queue_depth_list: List[Interval] = [] + all_events.sort(key=new_old_event_comparator) + for event in all_events: + # Find latest cuda kernel event + if hasattr(event, "start_us"): + start_time = event.start_us() * 1000 + end_time = (event.start_us() + event.duration_us()) * 1000 + # Find current spawned cuda kernel event + if event in kernel_mapping and kernel_mapping[event] is not None: + spawned_kernel_index = kernel_mapping[event] + elif hasattr(event, "start_time_ns"): + start_time = event.start_time_ns # type: ignore[attr-defined] + end_time = event.end_time_ns # type: ignore[attr-defined] + + while ( + current_kernel_index < len(cuda_kernel_events) + and (cuda_kernel_events[current_kernel_index].start_us()) * 1000 + <= start_time + ): + current_kernel_index += 1 + current_queue_depth = spawned_kernel_index - current_kernel_index + 1 + current_queue_depth = max(current_queue_depth, 0) + + if hasattr(event, "start_us"): + queue_depth_list.append( + Interval(start_time, end_time, current_queue_depth) + ) + elif hasattr(event, "start_time_ns"): + self.metrics[EventKey(event)].queue_depth = current_queue_depth + + return queue_depth_list + + def compute_idle_time(self): + """ + Computes idle time of the profile. + """ + # Based on queue_depth_list, we can calculate idle time for all the events + idle = False + idle_start = 0 + idle_intervals: List[Interval] = [] + if self.queue_depth_list and self.events: + idle_intervals += [ + Interval(self.events[0].start_time_ns, self.queue_depth_list[0].start), + Interval(self.queue_depth_list[-1].end, self.events[-1].end_time_ns), + ] + + for data_point in self.queue_depth_list: + if data_point.queue_depth == 0 and not idle: + idle_start = data_point.end + idle = True + if data_point.queue_depth > 0 and idle: + idle_intervals.append(Interval(idle_start, data_point.start)) + idle = False + + event_list = [e.event for e in self.metrics.keys()] + for event in event_list: + self.metrics[EventKey(event)].idle_time_ns = EventKey( + event + ).intervals_overlap(idle_intervals) + + def rank_events(self, length): + """ + Filter and Rank the events based on some heuristics: + 1) Events that are in the falling phase of the queue depth. + 2) Events that have a high idle_time, self_time difference. + + Parameters: + length: The number of events to return. + """ + + # Find the interval when qd is falling to 0 + import torch + + queue_depth_list = list(reversed(self.queue_depth_list)) + qd_values = [e.queue_depth for e in queue_depth_list] + + bottom_threashold = 0 + top_threashold = 4 + decrease_interval = [] + i = 0 + while i < len(qd_values): + if qd_values[i] > bottom_threashold: + i += 1 + continue + for j in range(i + 1, len(qd_values)): + # Find next zero and if the max value between them exceeds + # the threshold, then we have a falling interval + next_minimum_idx = index_of_first_match( + qd_values, lambda x: x <= bottom_threashold, start=j + ) + peak_idx = argmax(qd_values, start=j, end=next_minimum_idx) + + # if is a valid peak, we add to list and continue + if peak_idx is not None and qd_values[peak_idx] >= top_threashold: + decrease_interval.append( + Interval( + queue_depth_list[peak_idx].start, queue_depth_list[i].start + ) + ) + i = next_minimum_idx if next_minimum_idx is not None else i + break + i += 1 + # Filter out events that are not in the decrease interval + event_list = [ + event + for event in self.metrics.keys() + if event.intervals_overlap(decrease_interval) + ] + if event_list: + self_time = torch.tensor( + [self.metrics[event].self_time_ns for event in event_list], + dtype=torch.float32, + ) + idle_time = torch.tensor( + [self.metrics[event].fraction_idle_time for event in event_list], + dtype=torch.float32, + ) + normalized_gain = (idle_time - torch.mean(idle_time)) / torch.std(idle_time) + normalized_self = (self_time - torch.mean(self_time)) / torch.std(self_time) + heuristic_score_list = normalized_gain + 0.6 * normalized_self + + # Sort events by heuristic + event_list = [ + event + for _, event in sorted( + zip(heuristic_score_list, event_list), + key=lambda x: x[0], + reverse=True, + ) + ] + event_list = event_list[:length] + return event_list + + def get_optimizable_events(self, length: int = 1, print_enable: bool = True): + event_list = self.rank_events(length) + if not print_enable: + return event_list + output = "Optimizable events:\n" if event_list else "No events to optimize\n" + + output += "\n".join( + [ + f"""{'-'*80} +Event: {event} +Source code location: {source_code_location(event.event)} +Percentage idle time: {self.metrics[event].fraction_idle_time * 100:.2f}% +{'-'*80}""" + for event in event_list + ] + ) + if print_enable: + print(output) + return event_list + + +def index_of_first_match(seq, predicate, start=0, end=None): + if end is None or end >= len(seq): + end = len(seq) + for i in range(start, end): + if predicate(seq[i]): + return i + return None + + +def argmax(seq, key=lambda x: x, start=0, end=None): + seq = seq[start:end] + if len(seq) == 0: + return None + return seq.index(max(seq, key=key)) + start + + +def source_code_location(event): + while event is not None: + match = re.search(r"\.py\(.*\)", event.name) + if match is None: + event = event.parent + continue + return event.name + return "No source code location found" + + +# Provide an OSS workaround for cudagraphs + CUPTI issue +# https://github.com/pytorch/pytorch/issues/75504 +# TODO(dberard) - deprecate / remove workaround for CUDA >= 12, when +# we stop supporting older CUDA versions. +def _init_for_cuda_graphs(): + from torch.autograd.profiler import profile + + with profile(): + pass diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/itt.py b/env-llmeval/lib/python3.10/site-packages/torch/profiler/itt.py new file mode 100644 index 0000000000000000000000000000000000000000..4d072957d6fe4e611619495c98bb2ee1629e3219 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/profiler/itt.py @@ -0,0 +1,78 @@ +from contextlib import contextmanager + +try: + from torch._C import _itt +except ImportError: + + class _ITTStub: + @staticmethod + def _fail(*args, **kwargs): + raise RuntimeError( + "ITT functions not installed. Are you sure you have a ITT build?" + ) + + @staticmethod + def is_available(): + return False + + rangePush = _fail + rangePop = _fail + mark = _fail + + _itt = _ITTStub() # type: ignore[assignment] + + +__all__ = ["is_available", "range_push", "range_pop", "mark", "range"] + + +def is_available(): + """ + Check if ITT feature is available or not + """ + return _itt.is_available() + + +def range_push(msg): + """ + Pushes a range onto a stack of nested range span. Returns zero-based + depth of the range that is started. + + Arguments: + msg (str): ASCII message to associate with range + """ + return _itt.rangePush(msg) + + +def range_pop(): + """ + Pops a range off of a stack of nested range spans. Returns the + zero-based depth of the range that is ended. + """ + return _itt.rangePop() + + +def mark(msg): + """ + Describe an instantaneous event that occurred at some point. + + Arguments: + msg (str): ASCII message to associate with the event. + """ + return _itt.mark(msg) + + +@contextmanager +def range(msg, *args, **kwargs): + """ + Context manager / decorator that pushes an ITT range at the beginning + of its scope, and pops it at the end. If extra arguments are given, + they are passed as arguments to msg.format(). + + Args: + msg (str): message to associate with the range + """ + range_push(msg.format(*args, **kwargs)) + try: + yield + finally: + range_pop() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/profiler.py b/env-llmeval/lib/python3.10/site-packages/torch/profiler/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..c7d3bad02572c1c2c94230803469fb38499142c2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/profiler/profiler.py @@ -0,0 +1,754 @@ +import gzip +import json +import os +import tempfile +from enum import Enum +from functools import partial +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple +from warnings import warn + +import torch +import torch.autograd.profiler as prof +from torch._C import _get_privateuse1_backend_name +from torch._C._profiler import ( + _add_execution_trace_observer, + _disable_execution_trace_observer, + _enable_execution_trace_observer, + _ExperimentalConfig, + _remove_execution_trace_observer, +) +from torch.autograd import kineto_available, ProfilerActivity +from torch.profiler._memory_profiler import MemoryProfile, MemoryProfileTimeline + + +__all__ = [ + "supported_activities", + "ProfilerAction", + "schedule", + "tensorboard_trace_handler", + "profile", + "ExecutionTraceObserver", +] +PROFILER_STEP_NAME = "ProfilerStep" + + +def supported_activities(): + """ + Returns a set of supported profiler tracing activities. + + Note: profiler uses CUPTI library to trace on-device CUDA kernels. + In case when CUDA is enabled but CUPTI is not available, passing + ``ProfilerActivity.CUDA`` to profiler results in using the legacy CUDA + profiling code (same as in the legacy ``torch.autograd.profiler``). + This, in turn, results in including CUDA time in the profiler table output, + but not in the JSON trace. + """ + return torch.autograd._supported_activities() + + +class _KinetoProfile: + """Low-level profiler wrap the autograd profile + + Args: + activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values: + ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``. + Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA. + record_shapes (bool): save information about operator's input shapes. + profile_memory (bool): track tensor memory allocation/deallocation (see ``export_memory_timeline`` + for more details). + with_stack (bool): record source information (file and line number) for the ops. + with_flops (bool): use formula to estimate the FLOPS of specific operators + (matrix multiplication and 2D convolution). + with_modules (bool): record module hierarchy (including function names) + corresponding to the callstack of the op. e.g. If module A's forward call's + module B's forward which contains an aten::add op, + then aten::add's module hierarchy is A.B + Note that this support exist, at the moment, only for TorchScript models + and not eager mode models. + + experimental_config (_ExperimentalConfig) : A set of experimental options + used by profiler libraries like Kineto. Note, backward compatibility is not guaranteed. + + .. note:: + This API is experimental and subject to change in the future. + + Enabling shape and stack tracing results in additional overhead. + When record_shapes=True is specified, profiler will temporarily hold references to the tensors; + that may further prevent certain optimizations that depend on the reference count and introduce + extra tensor copies. + """ + + def __init__( + self, + *, + activities: Optional[Iterable[ProfilerActivity]] = None, + record_shapes: bool = False, + profile_memory: bool = False, + with_stack: bool = False, + with_flops: bool = False, + with_modules: bool = False, + experimental_config: Optional[_ExperimentalConfig] = None, + ): + self.activities = set(activities) if activities else supported_activities() + self.record_shapes = record_shapes + self.with_flops = with_flops + self.profile_memory = profile_memory + self.with_stack = with_stack + self.with_modules = with_modules + self.experimental_config = experimental_config + self.profiler: Optional[prof.profile] = None + self.mem_tl: Optional[MemoryProfileTimeline] = None + self.use_device = None + privateuse1_backend = _get_privateuse1_backend_name() + if privateuse1_backend != "privateuseone": + self.use_device = privateuse1_backend + + def start(self): + self.prepare_trace() + self.start_trace() + + def stop(self): + self.stop_trace() + + def prepare_trace(self): + self.profiler = prof.profile( + use_cuda=(ProfilerActivity.CUDA in self.activities), + use_cpu=(ProfilerActivity.CPU in self.activities), + use_mtia=(ProfilerActivity.MTIA in self.activities), + use_device=None, + record_shapes=self.record_shapes, + with_flops=self.with_flops, + profile_memory=self.profile_memory, + with_stack=self.with_stack, + with_modules=self.with_modules, + use_kineto=True, + experimental_config=self.experimental_config, + ) + self.profiler._prepare_trace() + + def start_trace(self): + assert self.profiler is not None + self.profiler._start_trace() + + if self.profile_memory: + self.add_metadata_json("profile_memory", "1") + if self.with_stack: + self.add_metadata_json("with_stack", "1") + if self.record_shapes: + self.add_metadata_json("record_shapes", "1") + if self.with_modules: + self.add_metadata_json("with_modules", "1") + if self.with_flops: + self.add_metadata_json("with_flops", "1") + + if kineto_available(): + dist_info = self._get_distributed_info() + if dist_info: + self.add_metadata_json("distributedInfo", json.dumps(dist_info)) + + if hasattr(torch, "_inductor"): + import torch._inductor.config as inductor_config + + if inductor_config.triton.cudagraphs: + os.environ["DISABLE_CUPTI_LAZY_REINIT"] = "1" + self.add_metadata_json("DISABLE_CUPTI_LAZY_REINIT", "1") + # FIXME: CUDA Graph does not work well with CUPTI teardown. + # 1) crashes on 1st lazy CUPTI re-init after teardown (CUDA 11) + # 2) crashes on 2nd non-lazy CUPTI re-init after teardown (CUDA 12) + # Workaround: turn off CUPTI teardown when using CUDA Graphs. + os.environ["TEARDOWN_CUPTI"] = "0" + + def stop_trace(self): + assert self.profiler is not None + self.profiler.__exit__(None, None, None) + + def export_chrome_trace(self, path: str): + """ + Exports the collected trace in Chrome JSON format. + """ + assert self.profiler + if path.endswith(".gz"): + fp = tempfile.NamedTemporaryFile("w+t", suffix=".json", delete=False) + fp.close() + retvalue = self.profiler.export_chrome_trace(fp.name) + with open(fp.name) as fin: + with gzip.open(path, "wt") as fout: + fout.writelines(fin) + os.remove(fp.name) + return retvalue + else: + return self.profiler.export_chrome_trace(path) + + def export_stacks(self, path: str, metric: str = "self_cpu_time_total"): + """Save stack traces in a file in a format suitable for visualization. + + Args: + path (str): save stacks file to this location; + metric (str): metric to use: "self_cpu_time_total" or "self_cuda_time_total" + + .. note:: + Example of using FlameGraph tool: + + - git clone https://github.com/brendangregg/FlameGraph + - cd FlameGraph + - ./flamegraph.pl --title "CPU time" --countname "us." profiler.stacks > perf_viz.svg + """ + assert self.profiler + return self.profiler.export_stacks(path, metric) + + def key_averages( + self, group_by_input_shape: bool = False, group_by_stack_n: int = 0 + ): + """Averages events, grouping them by operator name and (optionally) input shapes and + stack. + + .. note:: + To use shape/stack functionality make sure to set record_shapes/with_stack + when creating profiler context manager. + """ + assert self.profiler + return self.profiler.key_averages(group_by_input_shape, group_by_stack_n) + + def events(self): + """ + Returns the list of unaggregated profiler events, + to be used in the trace callback or after the profiling is finished + """ + assert self.profiler + return self.profiler.function_events + + def add_metadata(self, key: str, value: str): + """ + Adds a user defined metadata with a string key and a string value + into the trace file + """ + wrapped_value = '"' + value.replace('"', '\\"') + '"' + torch.autograd._add_metadata_json(key, wrapped_value) + + def add_metadata_json(self, key: str, value: str): + """ + Adds a user defined metadata with a string key and a valid json value + into the trace file + """ + torch.autograd._add_metadata_json(key, value) + + def _get_distributed_info(self): + import torch.distributed as dist + + if not dist.is_available() or not dist.is_initialized(): + return None + + return { + "backend": dist.get_backend(), + "rank": dist.get_rank(), + "world_size": dist.get_world_size(), + } + + def _memory_profile(self) -> MemoryProfile: + required = ("record_shapes", "profile_memory", "with_stack") + missing = [f"{i}=True" for i in required if not getattr(self, i)] + if missing: + raise ValueError(f"{', '.join(missing)} required for memory profiling.") + + assert self.profiler is not None and self.profiler.kineto_results is not None + return MemoryProfile(self.profiler.kineto_results) + + def export_memory_timeline(self, path: str, device: Optional[str] = None) -> None: + """Export memory event information from the profiler collected + tree for a given device, and export a timeline plot. There are 3 + exportable files using ``export_memory_timeline``, each controlled by the + ``path``'s suffix. + + - For an HTML compatible plot, use the suffix ``.html``, and a memory timeline + plot will be embedded as a PNG file in the HTML file. + + - For plot points consisting of ``[times, [sizes by category]]``, where + ``times`` are timestamps and ``sizes`` are memory usage for each category. + The memory timeline plot will be saved a JSON (``.json``) or gzipped JSON + (``.json.gz``) depending on the suffix. + + - For raw memory points, use the suffix ``.raw.json.gz``. Each raw memory + event will consist of ``(timestamp, action, numbytes, category)``, where + ``action`` is one of ``[PREEXISTING, CREATE, INCREMENT_VERSION, DESTROY]``, + and ``category`` is one of the enums from + ``torch.profiler._memory_profiler.Category``. + + Output: Memory timeline written as gzipped JSON, JSON, or HTML. + """ + # Default to device 0, if unset. Fallback on cpu. + if device is None and self.use_device and self.use_device != "cuda": + device = self.use_device + ":0" + + if device is None: + device = "cuda:0" if torch.cuda.is_available() else "cpu" + + # Construct the memory timeline plot data + self.mem_tl = MemoryProfileTimeline(self._memory_profile()) + + # Depending on the file suffix, save the data as json.gz or json. + # For html, we can embed the image into an HTML file. + if path.endswith(".html"): + self.mem_tl.export_memory_timeline_html(path, device) + elif path.endswith(".gz"): + fp = tempfile.NamedTemporaryFile("w+t", suffix=".json", delete=False) + fp.close() + if path.endswith("raw.json.gz"): + self.mem_tl.export_memory_timeline_raw(fp.name, device) + else: + self.mem_tl.export_memory_timeline(fp.name, device) + with open(fp.name) as fin: + with gzip.open(path, "wt") as fout: + fout.writelines(fin) + os.remove(fp.name) + else: + self.mem_tl.export_memory_timeline(path, device) + + +class ProfilerAction(Enum): + """ + Profiler actions that can be taken at the specified intervals + """ + + NONE = 0 + WARMUP = 1 + RECORD = 2 + RECORD_AND_SAVE = 3 + + +def schedule( + *, wait: int, warmup: int, active: int, repeat: int = 0, skip_first: int = 0 +) -> Callable: + """ + Returns a callable that can be used as profiler ``schedule`` argument. The profiler will skip + the first ``skip_first`` steps, then wait for ``wait`` steps, then do the warmup for the next ``warmup`` steps, + then do the active recording for the next ``active`` steps and then repeat the cycle starting with ``wait`` steps. + The optional number of cycles is specified with the ``repeat`` parameter, the zero value means that + the cycles will continue until the profiling is finished. + """ + + def schedule_fn(step: int) -> ProfilerAction: + assert step >= 0 + if step < skip_first: + return ProfilerAction.NONE + else: + step -= skip_first + num_steps = wait + warmup + active + if repeat > 0 and step / num_steps >= repeat: + return ProfilerAction.NONE + mod_step = step % num_steps + if mod_step < wait: + return ProfilerAction.NONE + elif mod_step < wait + warmup: + return ProfilerAction.WARMUP + else: + return ( + ProfilerAction.RECORD + if mod_step < num_steps - 1 + else ProfilerAction.RECORD_AND_SAVE + ) + + assert ( + wait >= 0 and warmup >= 0 and active > 0 and repeat >= 0 and skip_first >= 0 + ), "Invalid profiler schedule arguments" + if warmup == 0: + warn("Profiler won't be using warmup, this can skew profiler results") + return schedule_fn + + +def _default_schedule_fn(_: int) -> ProfilerAction: + """ + Default profiler behavior - immediately starts recording the events, + keeps doing it on every profiler step. + """ + return ProfilerAction.RECORD + + +def tensorboard_trace_handler( + dir_name: str, worker_name: Optional[str] = None, use_gzip: bool = False +): + """ + Outputs tracing files to directory of ``dir_name``, then that directory can be + directly delivered to tensorboard as logdir. + ``worker_name`` should be unique for each worker in distributed scenario, + it will be set to '[hostname]_[pid]' by default. + """ + import os + import socket + import time + + def handler_fn(prof) -> None: + nonlocal worker_name + if not os.path.isdir(dir_name): + try: + os.makedirs(dir_name, exist_ok=True) + except Exception as e: + raise RuntimeError("Can't create directory: " + dir_name) from e + if not worker_name: + worker_name = f"{socket.gethostname()}_{os.getpid()}" + # Use nanosecond here to avoid naming clash when exporting the trace + file_name = f"{worker_name}.{time.time_ns()}.pt.trace.json" + if use_gzip: + file_name = file_name + ".gz" + prof.export_chrome_trace(os.path.join(dir_name, file_name)) + + return handler_fn + + +class profile(_KinetoProfile): + """Profiler context manager. + + Args: + activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values: + ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``. + Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA. + schedule (Callable): callable that takes step (int) as a single parameter and returns + ``ProfilerAction`` value that specifies the profiler action to perform at each step. + on_trace_ready (Callable): callable that is called at each step when ``schedule`` + returns ``ProfilerAction.RECORD_AND_SAVE`` during the profiling. + record_shapes (bool): save information about operator's input shapes. + profile_memory (bool): track tensor memory allocation/deallocation. + with_stack (bool): record source information (file and line number) for the ops. + with_flops (bool): use formula to estimate the FLOPs (floating point operations) of specific operators + (matrix multiplication and 2D convolution). + with_modules (bool): record module hierarchy (including function names) + corresponding to the callstack of the op. e.g. If module A's forward call's + module B's forward which contains an aten::add op, + then aten::add's module hierarchy is A.B + Note that this support exist, at the moment, only for TorchScript models + and not eager mode models. + experimental_config (_ExperimentalConfig) : A set of experimental options + used for Kineto library features. Note, backward compatibility is not guaranteed. + + use_cuda (bool): + .. deprecated:: 1.8.1 + use ``activities`` instead. + + .. note:: + Use :func:`~torch.profiler.schedule` to generate the callable schedule. + Non-default schedules are useful when profiling long training jobs + and allow the user to obtain multiple traces at the different iterations + of the training process. + The default schedule simply records all the events continuously for the + duration of the context manager. + + .. note:: + Use :func:`~torch.profiler.tensorboard_trace_handler` to generate result files for TensorBoard: + + ``on_trace_ready=torch.profiler.tensorboard_trace_handler(dir_name)`` + + After profiling, result files can be found in the specified directory. Use the command: + + ``tensorboard --logdir dir_name`` + + to see the results in TensorBoard. + For more information, see + `PyTorch Profiler TensorBoard Plugin `__ + + .. note:: + Enabling shape and stack tracing results in additional overhead. + When record_shapes=True is specified, profiler will temporarily hold references to the tensors; + that may further prevent certain optimizations that depend on the reference count and introduce + extra tensor copies. + + Examples: + + .. code-block:: python + + with torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.CUDA, + ] + ) as p: + code_to_profile() + print(p.key_averages().table( + sort_by="self_cuda_time_total", row_limit=-1)) + + Using the profiler's ``schedule``, ``on_trace_ready`` and ``step`` functions: + + .. code-block:: python + + # Non-default profiler schedule allows user to turn profiler on and off + # on different iterations of the training loop; + # trace_handler is called every time a new trace becomes available + def trace_handler(prof): + print(prof.key_averages().table( + sort_by="self_cuda_time_total", row_limit=-1)) + # prof.export_chrome_trace("/tmp/test_trace_" + str(prof.step_num) + ".json") + + with torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.CUDA, + ], + + # In this example with wait=1, warmup=1, active=2, repeat=1, + # profiler will skip the first step/iteration, + # start warming up on the second, record + # the third and the forth iterations, + # after which the trace will become available + # and on_trace_ready (when set) is called; + # the cycle repeats starting with the next step + + schedule=torch.profiler.schedule( + wait=1, + warmup=1, + active=2, + repeat=1), + on_trace_ready=trace_handler + # on_trace_ready=torch.profiler.tensorboard_trace_handler('./log') + # used when outputting for tensorboard + ) as p: + for iter in range(N): + code_iteration_to_profile(iter) + # send a signal to the profiler that the next iteration has started + p.step() + """ + + def __init__( + self, + *, + activities: Optional[Iterable[ProfilerActivity]] = None, + schedule: Optional[Callable[[int], ProfilerAction]] = None, + on_trace_ready: Optional[Callable[..., Any]] = None, + record_shapes: bool = False, + profile_memory: bool = False, + with_stack: bool = False, + with_flops: bool = False, + with_modules: bool = False, + experimental_config: Optional[_ExperimentalConfig] = None, + # deprecated: + use_cuda: Optional[bool] = None, + ): + activities_set = set(activities) if activities else supported_activities() + if use_cuda is not None: + warn("use_cuda is deprecated, use activities argument instead") + if use_cuda: + activities_set.add(ProfilerActivity.CUDA) + elif ProfilerActivity.CUDA in activities_set: + activities_set.remove(ProfilerActivity.CUDA) + assert len(activities_set) > 0, "No valid profiler activities found" + + super().__init__( + activities=activities, + record_shapes=record_shapes, + profile_memory=profile_memory, + with_stack=with_stack, + with_flops=with_flops, + with_modules=with_modules, + experimental_config=experimental_config, + ) + + if schedule: + self.schedule = schedule + # add step markers into the trace and table view + self.record_steps = True + else: + self.schedule = _default_schedule_fn + self.record_steps = False + self.on_trace_ready = on_trace_ready + self.step_num = 0 + self.current_action = self.schedule(self.step_num) + self.step_rec_fn: Optional[prof.record_function] = None + + self.action_map: Dict[ + Tuple[ProfilerAction, Optional[ProfilerAction]], List[Any] + ] = { + # key is (prev_action, current_action), value is action list corresponding to the state pair. + (ProfilerAction.NONE, ProfilerAction.NONE): [], + (ProfilerAction.NONE, ProfilerAction.WARMUP): [self.prepare_trace], + (ProfilerAction.NONE, ProfilerAction.RECORD): [ + self.prepare_trace, + self.start_trace, + ], + (ProfilerAction.NONE, ProfilerAction.RECORD_AND_SAVE): [ + self.prepare_trace, + self.start_trace, + ], + (ProfilerAction.WARMUP, ProfilerAction.NONE): [ + partial(warn, "Incorrect schedule: WARMUP followed by NONE"), + self.start_trace, + self.stop_trace, + ], + (ProfilerAction.WARMUP, ProfilerAction.WARMUP): [], + (ProfilerAction.WARMUP, ProfilerAction.RECORD): [self.start_trace], + (ProfilerAction.WARMUP, ProfilerAction.RECORD_AND_SAVE): [self.start_trace], + (ProfilerAction.RECORD, ProfilerAction.NONE): [ + partial(warn, "Incorrect schedule: RECORD followed by NONE"), + self.stop_trace, + ], + (ProfilerAction.RECORD, ProfilerAction.WARMUP): [ + partial(warn, "Incorrect schedule: RECORD followed by WARMUP"), + self.stop_trace, + ], + (ProfilerAction.RECORD, ProfilerAction.RECORD): [], + (ProfilerAction.RECORD, ProfilerAction.RECORD_AND_SAVE): [], + (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.NONE): [ + self.stop_trace, + self._trace_ready, + ], + (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.WARMUP): [ + self.stop_trace, + self._trace_ready, + self.prepare_trace, + ], + (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.RECORD): [ + self.stop_trace, + self._trace_ready, + self.prepare_trace, + self.start_trace, + ], + (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.RECORD_AND_SAVE): [ + self.stop_trace, + self._trace_ready, + self.prepare_trace, + self.start_trace, + ], + # used for exit action + (ProfilerAction.WARMUP, None): [self.start_trace, self.stop_trace], + (ProfilerAction.RECORD, None): [self.stop_trace, self._trace_ready], + (ProfilerAction.RECORD_AND_SAVE, None): [ + self.stop_trace, + self._trace_ready, + ], + } + # Start tracking increments to profiler step, this will be used + # by Kineto + prof.KinetoStepTracker.init_step_count(PROFILER_STEP_NAME) + + def __enter__(self): + self.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.stop() + prof.KinetoStepTracker.erase_step_count(PROFILER_STEP_NAME) + + def start(self): + self._transit_action(ProfilerAction.NONE, self.current_action) + if self.record_steps: + self.step_rec_fn = prof.record_function( + "ProfilerStep#" + str(self.step_num) + ) + self.step_rec_fn.__enter__() + + def stop(self): + if self.record_steps and self.step_rec_fn: + self.step_rec_fn.__exit__(None, None, None) + self._transit_action(self.current_action, None) + + def step(self): + """ + Signals the profiler that the next profiling step has started. + """ + if self.record_steps and self.step_rec_fn: + self.step_rec_fn.__exit__(None, None, None) + prev_action = self.current_action + cur_step = self.step_num + self.step_num += 1 + self.current_action = self.schedule(self.step_num) + + self._transit_action(prev_action, self.current_action) + prof.KinetoStepTracker.increment_step(PROFILER_STEP_NAME) + + if self.record_steps: + self.step_rec_fn = prof.record_function("ProfilerStep#" + str(cur_step)) + self.step_rec_fn.__enter__() + + def _trace_ready(self): + if self.on_trace_ready: + self.on_trace_ready(self) + + def _transit_action(self, prev_action, current_action): + action_list = self.action_map.get((prev_action, current_action)) + if action_list: + for action in action_list: + action() + + +class ExecutionTraceObserver: + """Execution Trace Observer + + Each process can have a single ExecutionTraceObserver instance. The observer + can be added to record function callbacks via calling register_callback() + explicitly. Without calling unregister_callback(), repeated calls to + register_callback() will not add additional observers to record function + callbacks. Once an ExecutionTraceObserver is created, the start() and stop() + methods control when the event data is recorded. + + Deleting or calling unregister_callback() will remove the observer from the + record function callbacks, finalize the output file, and will stop + incurring any overheads. + """ + + def __init__(self): + """ + Initializes the default states. + """ + self._registered = False + self._execution_trace_running = False + + def __del__(self): + """ + Calls unregister_callback() to make sure to finalize outputs. + """ + self.unregister_callback() + + def register_callback(self, output_file_path: str): + """ + Adds ET observer to record function callbacks. The data will be + written to output_file_path. + """ + if not self._registered: + self._output_file_path = output_file_path + self._registered = _add_execution_trace_observer(output_file_path) + + def unregister_callback(self): + """ + Removes ET observer from record function callbacks. + """ + if self._registered: + self.stop() + _remove_execution_trace_observer() + self._registered = False + + @property + def is_registered(self): + """ + Returns True if the execution trace observer is registered, otherwise False. + """ + return self._registered + + def is_running(self): + """ + Returns True if the observer is running, otherwise False. + """ + return self._execution_trace_running + + def start(self): + """ + Starts to capture. + """ + if self._registered and not self._execution_trace_running: + _enable_execution_trace_observer() + self._execution_trace_running = True + + def stop(self): + """ + Stops to capture. + """ + if self._execution_trace_running: + _disable_execution_trace_observer() + self._execution_trace_running = False + + def get_output_file_path(self) -> str: + """ + Returns the output file name. + """ + if self.is_registered: + return self._output_file_path + else: + raise RuntimeError( + "A callback to the ET profiler needs to be registered " + "first before getting the output file path" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/profiler/python_tracer.py b/env-llmeval/lib/python3.10/site-packages/torch/profiler/python_tracer.py new file mode 100644 index 0000000000000000000000000000000000000000..b3e624911f95812a523d4dd927a74eec7fe5171b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/profiler/python_tracer.py @@ -0,0 +1,20 @@ +import os +import site +import sys +import typing + +import torch + + +def _prefix_regex() -> typing.List[str]: + raw_paths = ( + site.getsitepackages() + + sys.path + + [site.getuserbase()] + + [site.getusersitepackages()] + + [os.path.dirname(os.path.dirname(torch.__file__))] + ) + + path_prefixes = sorted({os.path.abspath(i) for i in raw_paths}, reverse=True) + assert all(isinstance(i, str) for i in path_prefixes) + return [i + os.sep for i in path_prefixes] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/special/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/special/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a25f0f7c03682ed1e11a869be6551fafdad40f34 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/special/__init__.py @@ -0,0 +1,1283 @@ +import torch +from torch._C import _add_docstr, _special # type: ignore[attr-defined] +from torch._torch_docs import common_args, multi_dim_common + +__all__ = [ + 'airy_ai', + 'bessel_j0', + 'bessel_j1', + 'bessel_y0', + 'bessel_y1', + 'chebyshev_polynomial_t', + 'chebyshev_polynomial_u', + 'chebyshev_polynomial_v', + 'chebyshev_polynomial_w', + 'digamma', + 'entr', + 'erf', + 'erfc', + 'erfcx', + 'erfinv', + 'exp2', + 'expit', + 'expm1', + 'gammainc', + 'gammaincc', + 'gammaln', + 'hermite_polynomial_h', + 'hermite_polynomial_he', + 'i0', + 'i0e', + 'i1', + 'i1e', + 'laguerre_polynomial_l', + 'legendre_polynomial_p', + 'log1p', + 'log_ndtr', + 'log_softmax', + 'logit', + 'logsumexp', + 'modified_bessel_i0', + 'modified_bessel_i1', + 'modified_bessel_k0', + 'modified_bessel_k1', + 'multigammaln', + 'ndtr', + 'ndtri', + 'polygamma', + 'psi', + 'round', + 'shifted_chebyshev_polynomial_t', + 'shifted_chebyshev_polynomial_u', + 'shifted_chebyshev_polynomial_v', + 'shifted_chebyshev_polynomial_w', + 'scaled_modified_bessel_k0', + 'scaled_modified_bessel_k1', + 'sinc', + 'softmax', + 'spherical_bessel_j0', + 'xlog1py', + 'xlogy', + 'zeta', +] + +Tensor = torch.Tensor + +entr = _add_docstr(_special.special_entr, + r""" +entr(input, *, out=None) -> Tensor +Computes the entropy on :attr:`input` (as defined below), elementwise. + +.. math:: + \begin{align} + \text{entr(x)} = \begin{cases} + -x * \ln(x) & x > 0 \\ + 0 & x = 0.0 \\ + -\infty & x < 0 + \end{cases} + \end{align} +""" + """ + +Args: + input (Tensor): the input tensor. + +Keyword args: + out (Tensor, optional): the output tensor. + +Example:: + >>> a = torch.arange(-0.5, 1, 0.5) + >>> a + tensor([-0.5000, 0.0000, 0.5000]) + >>> torch.special.entr(a) + tensor([ -inf, 0.0000, 0.3466]) +""") + +psi = _add_docstr(_special.special_psi, + r""" +psi(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.digamma`. +""") + +digamma = _add_docstr(_special.special_digamma, + r""" +digamma(input, *, out=None) -> Tensor + +Computes the logarithmic derivative of the gamma function on `input`. + +.. math:: + \digamma(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)} +""" + r""" +Args: + input (Tensor): the tensor to compute the digamma function on + +Keyword args: + {out} + +.. note:: This function is similar to SciPy's `scipy.special.digamma`. + +.. note:: From PyTorch 1.8 onwards, the digamma function returns `-Inf` for `0`. + Previously it returned `NaN` for `0`. + +Example:: + + >>> a = torch.tensor([1, 0.5]) + >>> torch.special.digamma(a) + tensor([-0.5772, -1.9635]) + +""".format(**common_args)) + +gammaln = _add_docstr(_special.special_gammaln, + r""" +gammaln(input, *, out=None) -> Tensor + +Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`. + +.. math:: + \text{out}_{i} = \ln \Gamma(|\text{input}_{i}|) +""" + """ +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.arange(0.5, 2, 0.5) + >>> torch.special.gammaln(a) + tensor([ 0.5724, 0.0000, -0.1208]) + +""".format(**common_args)) + +polygamma = _add_docstr(_special.special_polygamma, + r""" +polygamma(n, input, *, out=None) -> Tensor + +Computes the :math:`n^{th}` derivative of the digamma function on :attr:`input`. +:math:`n \geq 0` is called the order of the polygamma function. + +.. math:: + \psi^{(n)}(x) = \frac{d^{(n)}}{dx^{(n)}} \psi(x) + +.. note:: + This function is implemented only for nonnegative integers :math:`n \geq 0`. +""" + """ +Args: + n (int): the order of the polygamma function + {input} + +Keyword args: + {out} + +Example:: + >>> a = torch.tensor([1, 0.5]) + >>> torch.special.polygamma(1, a) + tensor([1.64493, 4.9348]) + >>> torch.special.polygamma(2, a) + tensor([ -2.4041, -16.8288]) + >>> torch.special.polygamma(3, a) + tensor([ 6.4939, 97.4091]) + >>> torch.special.polygamma(4, a) + tensor([ -24.8863, -771.4742]) +""".format(**common_args)) + +erf = _add_docstr(_special.special_erf, + r""" +erf(input, *, out=None) -> Tensor + +Computes the error function of :attr:`input`. The error function is defined as follows: + +.. math:: + \mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.erf(torch.tensor([0, -1., 10.])) + tensor([ 0.0000, -0.8427, 1.0000]) +""".format(**common_args)) + +erfc = _add_docstr(_special.special_erfc, + r""" +erfc(input, *, out=None) -> Tensor + +Computes the complementary error function of :attr:`input`. +The complementary error function is defined as follows: + +.. math:: + \mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.erfc(torch.tensor([0, -1., 10.])) + tensor([ 1.0000, 1.8427, 0.0000]) +""".format(**common_args)) + +erfcx = _add_docstr(_special.special_erfcx, + r""" +erfcx(input, *, out=None) -> Tensor + +Computes the scaled complementary error function for each element of :attr:`input`. +The scaled complementary error function is defined as follows: + +.. math:: + \mathrm{erfcx}(x) = e^{x^2} \mathrm{erfc}(x) +""" + r""" + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.erfcx(torch.tensor([0, -1., 10.])) + tensor([ 1.0000, 5.0090, 0.0561]) +""".format(**common_args)) + +erfinv = _add_docstr(_special.special_erfinv, + r""" +erfinv(input, *, out=None) -> Tensor + +Computes the inverse error function of :attr:`input`. +The inverse error function is defined in the range :math:`(-1, 1)` as: + +.. math:: + \mathrm{erfinv}(\mathrm{erf}(x)) = x +""" + r""" + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.erfinv(torch.tensor([0, 0.5, -1.])) + tensor([ 0.0000, 0.4769, -inf]) +""".format(**common_args)) + +logit = _add_docstr(_special.special_logit, + r""" +logit(input, eps=None, *, out=None) -> Tensor + +Returns a new tensor with the logit of the elements of :attr:`input`. +:attr:`input` is clamped to [eps, 1 - eps] when eps is not None. +When eps is None and :attr:`input` < 0 or :attr:`input` > 1, the function will yields NaN. + +.. math:: + \begin{align} + y_{i} &= \ln(\frac{z_{i}}{1 - z_{i}}) \\ + z_{i} &= \begin{cases} + x_{i} & \text{if eps is None} \\ + \text{eps} & \text{if } x_{i} < \text{eps} \\ + x_{i} & \text{if } \text{eps} \leq x_{i} \leq 1 - \text{eps} \\ + 1 - \text{eps} & \text{if } x_{i} > 1 - \text{eps} + \end{cases} + \end{align} +""" + r""" +Args: + {input} + eps (float, optional): the epsilon for input clamp bound. Default: ``None`` + +Keyword args: + {out} + +Example:: + + >>> a = torch.rand(5) + >>> a + tensor([0.2796, 0.9331, 0.6486, 0.1523, 0.6516]) + >>> torch.special.logit(a, eps=1e-6) + tensor([-0.9466, 2.6352, 0.6131, -1.7169, 0.6261]) +""".format(**common_args)) + +logsumexp = _add_docstr(_special.special_logsumexp, + r""" +logsumexp(input, dim, keepdim=False, *, out=None) + +Alias for :func:`torch.logsumexp`. +""".format(**multi_dim_common)) + +expit = _add_docstr(_special.special_expit, + r""" +expit(input, *, out=None) -> Tensor + +Computes the expit (also known as the logistic sigmoid function) of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}} +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> t = torch.randn(4) + >>> t + tensor([ 0.9213, 1.0887, -0.8858, -1.7683]) + >>> torch.special.expit(t) + tensor([ 0.7153, 0.7481, 0.2920, 0.1458]) +""".format(**common_args)) + +exp2 = _add_docstr(_special.special_exp2, + r""" +exp2(input, *, out=None) -> Tensor + +Computes the base two exponential function of :attr:`input`. + +.. math:: + y_{i} = 2^{x_{i}} + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.exp2(torch.tensor([0, math.log2(2.), 3, 4])) + tensor([ 1., 2., 8., 16.]) +""".format(**common_args)) + +expm1 = _add_docstr(_special.special_expm1, + r""" +expm1(input, *, out=None) -> Tensor + +Computes the exponential of the elements minus 1 +of :attr:`input`. + +.. math:: + y_{i} = e^{x_{i}} - 1 + +.. note:: This function provides greater precision than exp(x) - 1 for small values of x. + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.special.expm1(torch.tensor([0, math.log(2.)])) + tensor([ 0., 1.]) +""".format(**common_args)) + +xlog1py = _add_docstr(_special.special_xlog1py, + r""" +xlog1py(input, other, *, out=None) -> Tensor + +Computes ``input * log1p(other)`` with the following cases. + +.. math:: + \text{out}_{i} = \begin{cases} + \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\ + 0 & \text{if } \text{input}_{i} = 0.0 \text{ and } \text{other}_{i} != \text{NaN} \\ + \text{input}_{i} * \text{log1p}(\text{other}_{i})& \text{otherwise} + \end{cases} + +Similar to SciPy's `scipy.special.xlog1py`. + +""" + r""" + +Args: + input (Number or Tensor) : Multiplier + other (Number or Tensor) : Argument + +.. note:: At least one of :attr:`input` or :attr:`other` must be a tensor. + +Keyword args: + {out} + +Example:: + + >>> x = torch.zeros(5,) + >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')]) + >>> torch.special.xlog1py(x, y) + tensor([0., 0., 0., 0., nan]) + >>> x = torch.tensor([1, 2, 3]) + >>> y = torch.tensor([3, 2, 1]) + >>> torch.special.xlog1py(x, y) + tensor([1.3863, 2.1972, 2.0794]) + >>> torch.special.xlog1py(x, 4) + tensor([1.6094, 3.2189, 4.8283]) + >>> torch.special.xlog1py(2, y) + tensor([2.7726, 2.1972, 1.3863]) +""".format(**common_args)) + +xlogy = _add_docstr(_special.special_xlogy, + r""" +xlogy(input, other, *, out=None) -> Tensor + +Computes ``input * log(other)`` with the following cases. + +.. math:: + \text{out}_{i} = \begin{cases} + \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\ + 0 & \text{if } \text{input}_{i} = 0.0 \\ + \text{input}_{i} * \log{(\text{other}_{i})} & \text{otherwise} + \end{cases} + +Similar to SciPy's `scipy.special.xlogy`. + +""" + r""" + +Args: + input (Number or Tensor) : Multiplier + other (Number or Tensor) : Argument + +.. note:: At least one of :attr:`input` or :attr:`other` must be a tensor. + +Keyword args: + {out} + +Example:: + + >>> x = torch.zeros(5,) + >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')]) + >>> torch.special.xlogy(x, y) + tensor([0., 0., 0., 0., nan]) + >>> x = torch.tensor([1, 2, 3]) + >>> y = torch.tensor([3, 2, 1]) + >>> torch.special.xlogy(x, y) + tensor([1.0986, 1.3863, 0.0000]) + >>> torch.special.xlogy(x, 4) + tensor([1.3863, 2.7726, 4.1589]) + >>> torch.special.xlogy(2, y) + tensor([2.1972, 1.3863, 0.0000]) +""".format(**common_args)) + +i0 = _add_docstr(_special.special_i0, + r""" +i0(input, *, out=None) -> Tensor + +Computes the zeroth order modified Bessel function of the first kind for each element of :attr:`input`. + +.. math:: + \text{out}_{i} = I_0(\text{input}_{i}) = \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2} + +""" + r""" +Args: + input (Tensor): the input tensor + +Keyword args: + {out} + +Example:: + + >>> torch.i0(torch.arange(5, dtype=torch.float32)) + tensor([ 1.0000, 1.2661, 2.2796, 4.8808, 11.3019]) + +""".format(**common_args)) + +i0e = _add_docstr(_special.special_i0e, + r""" +i0e(input, *, out=None) -> Tensor +Computes the exponentially scaled zeroth order modified Bessel function of the first kind (as defined below) +for each element of :attr:`input`. + +.. math:: + \text{out}_{i} = \exp(-|x|) * i0(x) = \exp(-|x|) * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2} + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.i0e(torch.arange(5, dtype=torch.float32)) + tensor([1.0000, 0.4658, 0.3085, 0.2430, 0.2070]) +""".format(**common_args)) + +i1 = _add_docstr(_special.special_i1, + r""" +i1(input, *, out=None) -> Tensor +Computes the first order modified Bessel function of the first kind (as defined below) +for each element of :attr:`input`. + +.. math:: + \text{out}_{i} = \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!} + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.i1(torch.arange(5, dtype=torch.float32)) + tensor([0.0000, 0.5652, 1.5906, 3.9534, 9.7595]) +""".format(**common_args)) + +i1e = _add_docstr(_special.special_i1e, + r""" +i1e(input, *, out=None) -> Tensor +Computes the exponentially scaled first order modified Bessel function of the first kind (as defined below) +for each element of :attr:`input`. + +.. math:: + \text{out}_{i} = \exp(-|x|) * i1(x) = + \exp(-|x|) * \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!} + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.i1e(torch.arange(5, dtype=torch.float32)) + tensor([0.0000, 0.2079, 0.2153, 0.1968, 0.1788]) +""".format(**common_args)) + +ndtr = _add_docstr(_special.special_ndtr, + r""" +ndtr(input, *, out=None) -> Tensor +Computes the area under the standard Gaussian probability density function, +integrated from minus infinity to :attr:`input`, elementwise. + +.. math:: + \text{ndtr}(x) = \frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3])) + tensor([0.0013, 0.0228, 0.1587, 0.5000, 0.8413, 0.9772, 0.9987]) +""".format(**common_args)) + +ndtri = _add_docstr(_special.special_ndtri, + r""" +ndtri(input, *, out=None) -> Tensor +Computes the argument, x, for which the area under the Gaussian probability density function +(integrated from minus infinity to x) is equal to :attr:`input`, elementwise. + +.. math:: + \text{ndtri}(p) = \sqrt{2}\text{erf}^{-1}(2p - 1) + +.. note:: + Also known as quantile function for Normal Distribution. + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.ndtri(torch.tensor([0, 0.25, 0.5, 0.75, 1])) + tensor([ -inf, -0.6745, 0.0000, 0.6745, inf]) +""".format(**common_args)) + +log_ndtr = _add_docstr(_special.special_log_ndtr, + r""" +log_ndtr(input, *, out=None) -> Tensor +Computes the log of the area under the standard Gaussian probability density function, +integrated from minus infinity to :attr:`input`, elementwise. + +.. math:: + \text{log\_ndtr}(x) = \log\left(\frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt \right) + +""" + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> torch.special.log_ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3])) + tensor([-6.6077 -3.7832 -1.841 -0.6931 -0.1728 -0.023 -0.0014]) +""".format(**common_args)) + +log1p = _add_docstr(_special.special_log1p, + r""" +log1p(input, *, out=None) -> Tensor + +Alias for :func:`torch.log1p`. +""") + +sinc = _add_docstr(_special.special_sinc, + r""" +sinc(input, *, out=None) -> Tensor + +Computes the normalized sinc of :attr:`input.` + +.. math:: + \text{out}_{i} = + \begin{cases} + 1, & \text{if}\ \text{input}_{i}=0 \\ + \sin(\pi \text{input}_{i}) / (\pi \text{input}_{i}), & \text{otherwise} + \end{cases} +""" + r""" + +Args: + {input} + +Keyword args: + {out} + +Example:: + >>> t = torch.randn(4) + >>> t + tensor([ 0.2252, -0.2948, 1.0267, -1.1566]) + >>> torch.special.sinc(t) + tensor([ 0.9186, 0.8631, -0.0259, -0.1300]) +""".format(**common_args)) + +round = _add_docstr(_special.special_round, + r""" +round(input, *, out=None) -> Tensor + +Alias for :func:`torch.round`. +""") + +softmax = _add_docstr(_special.special_softmax, + r""" +softmax(input, dim, *, dtype=None) -> Tensor + +Computes the softmax function. + +Softmax is defined as: + +:math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}` + +It is applied to all slices along dim, and will re-scale them so that the elements +lie in the range `[0, 1]` and sum to 1. + +Args: + input (Tensor): input + dim (int): A dimension along which softmax will be computed. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is cast to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + +Examples:: + >>> t = torch.ones(2, 2) + >>> torch.special.softmax(t, 0) + tensor([[0.5000, 0.5000], + [0.5000, 0.5000]]) + +""") + +log_softmax = _add_docstr(_special.special_log_softmax, + r""" +log_softmax(input, dim, *, dtype=None) -> Tensor + +Computes softmax followed by a logarithm. + +While mathematically equivalent to log(softmax(x)), doing these two +operations separately is slower and numerically unstable. This function +is computed as: + +.. math:: + \text{log\_softmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right) +""" + r""" + +Args: + input (Tensor): input + dim (int): A dimension along which log_softmax will be computed. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is cast to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + +Example:: + >>> t = torch.ones(2, 2) + >>> torch.special.log_softmax(t, 0) + tensor([[-0.6931, -0.6931], + [-0.6931, -0.6931]]) +""") + +zeta = _add_docstr(_special.special_zeta, + r""" +zeta(input, other, *, out=None) -> Tensor + +Computes the Hurwitz zeta function, elementwise. + +.. math:: + \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x} + +""" + r""" +Args: + input (Tensor): the input tensor corresponding to `x`. + other (Tensor): the input tensor corresponding to `q`. + +.. note:: + The Riemann zeta function corresponds to the case when `q = 1` + +Keyword args: + {out} + +Example:: + >>> x = torch.tensor([2., 4.]) + >>> torch.special.zeta(x, 1) + tensor([1.6449, 1.0823]) + >>> torch.special.zeta(x, torch.tensor([1., 2.])) + tensor([1.6449, 0.0823]) + >>> torch.special.zeta(2, torch.tensor([1., 2.])) + tensor([1.6449, 0.6449]) +""".format(**common_args)) + +multigammaln = _add_docstr(_special.special_multigammaln, + r""" +multigammaln(input, p, *, out=None) -> Tensor + +Computes the `multivariate log-gamma function +`_ with dimension +:math:`p` element-wise, given by + +.. math:: + \log(\Gamma_{p}(a)) = C + \displaystyle \sum_{i=1}^{p} \log\left(\Gamma\left(a - \frac{i - 1}{2}\right)\right) + +where :math:`C = \log(\pi) \cdot \frac{p (p - 1)}{4}` and :math:`\Gamma(-)` is the Gamma function. + +All elements must be greater than :math:`\frac{p - 1}{2}`, otherwise the behavior is undefiend. +""" + """ + +Args: + input (Tensor): the tensor to compute the multivariate log-gamma function + p (int): the number of dimensions + +Keyword args: + {out} + +Example:: + + >>> a = torch.empty(2, 3).uniform_(1, 2) + >>> a + tensor([[1.6835, 1.8474, 1.1929], + [1.0475, 1.7162, 1.4180]]) + >>> torch.special.multigammaln(a, 2) + tensor([[0.3928, 0.4007, 0.7586], + [1.0311, 0.3901, 0.5049]]) +""".format(**common_args)) + +gammainc = _add_docstr(_special.special_gammainc, + r""" +gammainc(input, other, *, out=None) -> Tensor + +Computes the regularized lower incomplete gamma function: + +.. math:: + \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_0^{\text{other}_i} t^{\text{input}_i-1} e^{-t} dt + +where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive +and at least one is strictly positive. +If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`. +:math:`\Gamma(\cdot)` in the equation above is the gamma function, + +.. math:: + \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt. + +See :func:`torch.special.gammaincc` and :func:`torch.special.gammaln` for related functions. + +Supports :ref:`broadcasting to a common shape ` +and float inputs. + +.. note:: + The backward pass with respect to :attr:`input` is not yet supported. + Please open an issue on PyTorch's Github to request it. + +""" + r""" +Args: + input (Tensor): the first non-negative input tensor + other (Tensor): the second non-negative input tensor + +Keyword args: + {out} + +Example:: + + >>> a1 = torch.tensor([4.0]) + >>> a2 = torch.tensor([3.0, 4.0, 5.0]) + >>> a = torch.special.gammaincc(a1, a2) + tensor([0.3528, 0.5665, 0.7350]) + tensor([0.3528, 0.5665, 0.7350]) + >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2) + tensor([1., 1., 1.]) + +""".format(**common_args)) + +gammaincc = _add_docstr(_special.special_gammaincc, + r""" +gammaincc(input, other, *, out=None) -> Tensor + +Computes the regularized upper incomplete gamma function: + +.. math:: + \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_{\text{other}_i}^{\infty} t^{\text{input}_i-1} e^{-t} dt + +where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive +and at least one is strictly positive. +If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`. +:math:`\Gamma(\cdot)` in the equation above is the gamma function, + +.. math:: + \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt. + +See :func:`torch.special.gammainc` and :func:`torch.special.gammaln` for related functions. + +Supports :ref:`broadcasting to a common shape ` +and float inputs. + +.. note:: + The backward pass with respect to :attr:`input` is not yet supported. + Please open an issue on PyTorch's Github to request it. + +""" + r""" +Args: + input (Tensor): the first non-negative input tensor + other (Tensor): the second non-negative input tensor + +Keyword args: + {out} + +Example:: + + >>> a1 = torch.tensor([4.0]) + >>> a2 = torch.tensor([3.0, 4.0, 5.0]) + >>> a = torch.special.gammaincc(a1, a2) + tensor([0.6472, 0.4335, 0.2650]) + >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2) + tensor([1., 1., 1.]) + +""".format(**common_args)) + +airy_ai = _add_docstr(_special.special_airy_ai, + r""" +airy_ai(input, *, out=None) -> Tensor + +Airy function :math:`\text{Ai}\left(\text{input}\right)`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +bessel_j0 = _add_docstr(_special.special_bessel_j0, + r""" +bessel_j0(input, *, out=None) -> Tensor + +Bessel function of the first kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +bessel_j1 = _add_docstr(_special.special_bessel_j1, + r""" +bessel_j1(input, *, out=None) -> Tensor + +Bessel function of the first kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +bessel_y0 = _add_docstr(_special.special_bessel_y0, + r""" +bessel_y0(input, *, out=None) -> Tensor + +Bessel function of the second kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +bessel_y1 = _add_docstr(_special.special_bessel_y1, + r""" +bessel_y1(input, *, out=None) -> Tensor + +Bessel function of the second kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +chebyshev_polynomial_t = _add_docstr(_special.special_chebyshev_polynomial_t, + r""" +chebyshev_polynomial_t(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the first kind :math:`T_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. If :math:`n < 6` or :math:`|\text{input}| > 1` the recursion: + +.. math:: + T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input}) + +is evaluated. Otherwise, the explicit trigonometric formula: + +.. math:: + T_{n}(\text{input}) = \text{cos}(n \times \text{arccos}(x)) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +chebyshev_polynomial_u = _add_docstr(_special.special_chebyshev_polynomial_u, + r""" +chebyshev_polynomial_t(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the second kind :math:`U_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, +:math:`2 \times \text{input}` is returned. If :math:`n < 6` or +:math:`|\text{input}| > 1`, the recursion: + +.. math:: + T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input}) + +is evaluated. Otherwise, the explicit trigonometric formula: + +.. math:: + \frac{\text{sin}((n + 1) \times \text{arccos}(\text{input}))}{\text{sin}(\text{arccos}(\text{input}))} + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +chebyshev_polynomial_v = _add_docstr(_special.special_chebyshev_polynomial_v, + r""" +chebyshev_polynomial_v(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +chebyshev_polynomial_w = _add_docstr(_special.special_chebyshev_polynomial_w, + r""" +chebyshev_polynomial_w(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +hermite_polynomial_h = _add_docstr(_special.special_hermite_polynomial_h, + r""" +hermite_polynomial_h(input, n, *, out=None) -> Tensor + +Physicist’s Hermite polynomial :math:`H_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. Otherwise, the recursion: + +.. math:: + H_{n + 1}(\text{input}) = 2 \times \text{input} \times H_{n}(\text{input}) - H_{n - 1}(\text{input}) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +hermite_polynomial_he = _add_docstr(_special.special_hermite_polynomial_he, + r""" +hermite_polynomial_he(input, n, *, out=None) -> Tensor + +Probabilist’s Hermite polynomial :math:`He_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. Otherwise, the recursion: + +.. math:: + He_{n + 1}(\text{input}) = 2 \times \text{input} \times He_{n}(\text{input}) - He_{n - 1}(\text{input}) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +laguerre_polynomial_l = _add_docstr(_special.special_laguerre_polynomial_l, + r""" +laguerre_polynomial_l(input, n, *, out=None) -> Tensor + +Laguerre polynomial :math:`L_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. Otherwise, the recursion: + +.. math:: + L_{n + 1}(\text{input}) = 2 \times \text{input} \times L_{n}(\text{input}) - L_{n - 1}(\text{input}) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +legendre_polynomial_p = _add_docstr(_special.special_legendre_polynomial_p, + r""" +legendre_polynomial_p(input, n, *, out=None) -> Tensor + +Legendre polynomial :math:`P_{n}(\text{input})`. + +If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` +is returned. Otherwise, the recursion: + +.. math:: + P_{n + 1}(\text{input}) = 2 \times \text{input} \times P_{n}(\text{input}) - P_{n - 1}(\text{input}) + +is evaluated. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +modified_bessel_i0 = _add_docstr(_special.special_modified_bessel_i0, + r""" +modified_bessel_i0(input, *, out=None) -> Tensor + +Modified Bessel function of the first kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +modified_bessel_i1 = _add_docstr(_special.special_modified_bessel_i1, + r""" +modified_bessel_i1(input, *, out=None) -> Tensor + +Modified Bessel function of the first kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +modified_bessel_k0 = _add_docstr(_special.special_modified_bessel_k0, + r""" +modified_bessel_k0(input, *, out=None) -> Tensor + +Modified Bessel function of the second kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +modified_bessel_k1 = _add_docstr(_special.special_modified_bessel_k1, + r""" +modified_bessel_k1(input, *, out=None) -> Tensor + +Modified Bessel function of the second kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +scaled_modified_bessel_k0 = _add_docstr(_special.special_scaled_modified_bessel_k0, + r""" +scaled_modified_bessel_k0(input, *, out=None) -> Tensor + +Scaled modified Bessel function of the second kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +scaled_modified_bessel_k1 = _add_docstr(_special.special_scaled_modified_bessel_k1, + r""" +scaled_modified_bessel_k1(input, *, out=None) -> Tensor + +Scaled modified Bessel function of the second kind of order :math:`1`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) + +shifted_chebyshev_polynomial_t = _add_docstr(_special.special_shifted_chebyshev_polynomial_t, + r""" +shifted_chebyshev_polynomial_t(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the first kind :math:`T_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +shifted_chebyshev_polynomial_u = _add_docstr(_special.special_shifted_chebyshev_polynomial_u, + r""" +shifted_chebyshev_polynomial_u(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the second kind :math:`U_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +shifted_chebyshev_polynomial_v = _add_docstr(_special.special_shifted_chebyshev_polynomial_v, + r""" +shifted_chebyshev_polynomial_v(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +shifted_chebyshev_polynomial_w = _add_docstr(_special.special_shifted_chebyshev_polynomial_w, + r""" +shifted_chebyshev_polynomial_w(input, n, *, out=None) -> Tensor + +Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`. + +""" + r""" +Args: + {input} + n (Tensor): Degree of the polynomial. + +Keyword args: + {out} +""".format(**common_args)) + +spherical_bessel_j0 = _add_docstr(_special.special_spherical_bessel_j0, + r""" +spherical_bessel_j0(input, *, out=None) -> Tensor + +Spherical Bessel function of the first kind of order :math:`0`. + +""" + r""" +Args: + {input} + +Keyword args: + {out} +""".format(**common_args)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/special/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/special/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04dfbf8c69e3e924e4f2407122c5eec5410eb211 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/special/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58b8f828e3546b0e311c0f9f4738314f38e9a195 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/__init__.py @@ -0,0 +1,3 @@ +from torch._C import FileCheck as FileCheck +from ._comparison import assert_allclose, assert_close as assert_close +from ._creation import make_tensor as make_tensor diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_comparison.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_comparison.py new file mode 100644 index 0000000000000000000000000000000000000000..147450138289736c6c7dd7e2aecfb27cafada73e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_comparison.py @@ -0,0 +1,1572 @@ +import abc +import cmath +import collections.abc +import contextlib +import warnings +from typing import ( + Any, + Callable, + Collection, + Dict, + List, + NoReturn, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +import torch + +try: + import numpy as np + + NUMPY_AVAILABLE = True +except ModuleNotFoundError: + NUMPY_AVAILABLE = False + + +class ErrorMeta(Exception): + """Internal testing exception that makes that carries error metadata.""" + + def __init__( + self, type: Type[Exception], msg: str, *, id: Tuple[Any, ...] = () + ) -> None: + super().__init__( + "If you are a user and see this message during normal operation " + "please file an issue at https://github.com/pytorch/pytorch/issues. " + "If you are a developer and working on the comparison functions, please `raise ErrorMeta().to_error()` " + "for user facing errors." + ) + self.type = type + self.msg = msg + self.id = id + + def to_error( + self, msg: Optional[Union[str, Callable[[str], str]]] = None + ) -> Exception: + if not isinstance(msg, str): + generated_msg = self.msg + if self.id: + generated_msg += f"\n\nThe failure occurred for item {''.join(str([item]) for item in self.id)}" + + msg = msg(generated_msg) if callable(msg) else generated_msg + + return self.type(msg) + + +# Some analysis of tolerance by logging tests from test_torch.py can be found in +# https://github.com/pytorch/pytorch/pull/32538. +# {dtype: (rtol, atol)} +_DTYPE_PRECISIONS = { + torch.float16: (0.001, 1e-5), + torch.bfloat16: (0.016, 1e-5), + torch.float32: (1.3e-6, 1e-5), + torch.float64: (1e-7, 1e-7), + torch.complex32: (0.001, 1e-5), + torch.complex64: (1.3e-6, 1e-5), + torch.complex128: (1e-7, 1e-7), +} +# The default tolerances of torch.float32 are used for quantized dtypes, because quantized tensors are compared in +# their dequantized and floating point representation. For more details see `TensorLikePair._compare_quantized_values` +_DTYPE_PRECISIONS.update( + { + dtype: _DTYPE_PRECISIONS[torch.float32] + for dtype in ( + torch.quint8, + torch.quint2x4, + torch.quint4x2, + torch.qint8, + torch.qint32, + ) + } +) + + +def default_tolerances( + *inputs: Union[torch.Tensor, torch.dtype], + dtype_precisions: Optional[Dict[torch.dtype, Tuple[float, float]]] = None, +) -> Tuple[float, float]: + """Returns the default absolute and relative testing tolerances for a set of inputs based on the dtype. + + See :func:`assert_close` for a table of the default tolerance for each dtype. + + Returns: + (Tuple[float, float]): Loosest tolerances of all input dtypes. + """ + dtypes = [] + for input in inputs: + if isinstance(input, torch.Tensor): + dtypes.append(input.dtype) + elif isinstance(input, torch.dtype): + dtypes.append(input) + else: + raise TypeError( + f"Expected a torch.Tensor or a torch.dtype, but got {type(input)} instead." + ) + dtype_precisions = dtype_precisions or _DTYPE_PRECISIONS + rtols, atols = zip(*[dtype_precisions.get(dtype, (0.0, 0.0)) for dtype in dtypes]) + return max(rtols), max(atols) + + +def get_tolerances( + *inputs: Union[torch.Tensor, torch.dtype], + rtol: Optional[float], + atol: Optional[float], + id: Tuple[Any, ...] = (), +) -> Tuple[float, float]: + """Gets absolute and relative to be used for numeric comparisons. + + If both ``rtol`` and ``atol`` are specified, this is a no-op. If both are not specified, the return value of + :func:`default_tolerances` is used. + + Raises: + ErrorMeta: With :class:`ValueError`, if only ``rtol`` or ``atol`` is specified. + + Returns: + (Tuple[float, float]): Valid absolute and relative tolerances. + """ + if (rtol is None) ^ (atol is None): + # We require both tolerance to be omitted or specified, because specifying only one might lead to surprising + # results. Imagine setting atol=0.0 and the tensors still match because rtol>0.0. + raise ErrorMeta( + ValueError, + f"Both 'rtol' and 'atol' must be either specified or omitted, " + f"but got no {'rtol' if rtol is None else 'atol'}.", + id=id, + ) + elif rtol is not None and atol is not None: + return rtol, atol + else: + return default_tolerances(*inputs) + + +def _make_mismatch_msg( + *, + default_identifier: str, + identifier: Optional[Union[str, Callable[[str], str]]] = None, + extra: Optional[str] = None, + abs_diff: float, + abs_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None, + atol: float, + rel_diff: float, + rel_diff_idx: Optional[Union[int, Tuple[int, ...]]] = None, + rtol: float, +) -> str: + """Makes a mismatch error message for numeric values. + + Args: + default_identifier (str): Default description of the compared values, e.g. "Tensor-likes". + identifier (Optional[Union[str, Callable[[str], str]]]): Optional identifier that overrides + ``default_identifier``. Can be passed as callable in which case it will be called with + ``default_identifier`` to create the description at runtime. + extra (Optional[str]): Extra information to be placed after the message header and the mismatch statistics. + abs_diff (float): Absolute difference. + abs_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the absolute difference. + atol (float): Allowed absolute tolerance. Will only be added to mismatch statistics if it or ``rtol`` are + ``> 0``. + rel_diff (float): Relative difference. + rel_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the relative difference. + rtol (float): Allowed relative tolerance. Will only be added to mismatch statistics if it or ``atol`` are + ``> 0``. + """ + equality = rtol == 0 and atol == 0 + + def make_diff_msg( + *, + type: str, + diff: float, + idx: Optional[Union[int, Tuple[int, ...]]], + tol: float, + ) -> str: + if idx is None: + msg = f"{type.title()} difference: {diff}" + else: + msg = f"Greatest {type} difference: {diff} at index {idx}" + if not equality: + msg += f" (up to {tol} allowed)" + return msg + "\n" + + if identifier is None: + identifier = default_identifier + elif callable(identifier): + identifier = identifier(default_identifier) + + msg = f"{identifier} are not {'equal' if equality else 'close'}!\n\n" + + if extra: + msg += f"{extra.strip()}\n" + + msg += make_diff_msg(type="absolute", diff=abs_diff, idx=abs_diff_idx, tol=atol) + msg += make_diff_msg(type="relative", diff=rel_diff, idx=rel_diff_idx, tol=rtol) + + return msg.strip() + + +def make_scalar_mismatch_msg( + actual: Union[bool, int, float, complex], + expected: Union[bool, int, float, complex], + *, + rtol: float, + atol: float, + identifier: Optional[Union[str, Callable[[str], str]]] = None, +) -> str: + """Makes a mismatch error message for scalars. + + Args: + actual (Union[bool, int, float, complex]): Actual scalar. + expected (Union[bool, int, float, complex]): Expected scalar. + rtol (float): Relative tolerance. + atol (float): Absolute tolerance. + identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the scalars. Can be passed + as callable in which case it will be called by the default value to create the description at runtime. + Defaults to "Scalars". + """ + abs_diff = abs(actual - expected) + rel_diff = float("inf") if expected == 0 else abs_diff / abs(expected) + return _make_mismatch_msg( + default_identifier="Scalars", + identifier=identifier, + extra=f"Expected {expected} but got {actual}.", + abs_diff=abs_diff, + atol=atol, + rel_diff=rel_diff, + rtol=rtol, + ) + + +def make_tensor_mismatch_msg( + actual: torch.Tensor, + expected: torch.Tensor, + matches: torch.Tensor, + *, + rtol: float, + atol: float, + identifier: Optional[Union[str, Callable[[str], str]]] = None, +): + """Makes a mismatch error message for tensors. + + Args: + actual (torch.Tensor): Actual tensor. + expected (torch.Tensor): Expected tensor. + matches (torch.Tensor): Boolean mask of the same shape as ``actual`` and ``expected`` that indicates the + location of matches. + rtol (float): Relative tolerance. + atol (float): Absolute tolerance. + identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the tensors. Can be passed + as callable in which case it will be called by the default value to create the description at runtime. + Defaults to "Tensor-likes". + """ + + def unravel_flat_index(flat_index: int) -> Tuple[int, ...]: + if not matches.shape: + return () + + inverse_index = [] + for size in matches.shape[::-1]: + div, mod = divmod(flat_index, size) + flat_index = div + inverse_index.append(mod) + + return tuple(inverse_index[::-1]) + + number_of_elements = matches.numel() + total_mismatches = number_of_elements - int(torch.sum(matches)) + extra = ( + f"Mismatched elements: {total_mismatches} / {number_of_elements} " + f"({total_mismatches / number_of_elements:.1%})" + ) + + actual_flat = actual.flatten() + expected_flat = expected.flatten() + matches_flat = matches.flatten() + + if not actual.dtype.is_floating_point and not actual.dtype.is_complex: + # TODO: Instead of always upcasting to int64, it would be sufficient to cast to the next higher dtype to avoid + # overflow + actual_flat = actual_flat.to(torch.int64) + expected_flat = expected_flat.to(torch.int64) + + abs_diff = torch.abs(actual_flat - expected_flat) + # Ensure that only mismatches are used for the max_abs_diff computation + abs_diff[matches_flat] = 0 + max_abs_diff, max_abs_diff_flat_idx = torch.max(abs_diff, 0) + + rel_diff = abs_diff / torch.abs(expected_flat) + # Ensure that only mismatches are used for the max_rel_diff computation + rel_diff[matches_flat] = 0 + max_rel_diff, max_rel_diff_flat_idx = torch.max(rel_diff, 0) + return _make_mismatch_msg( + default_identifier="Tensor-likes", + identifier=identifier, + extra=extra, + abs_diff=max_abs_diff.item(), + abs_diff_idx=unravel_flat_index(int(max_abs_diff_flat_idx)), + atol=atol, + rel_diff=max_rel_diff.item(), + rel_diff_idx=unravel_flat_index(int(max_rel_diff_flat_idx)), + rtol=rtol, + ) + + +class UnsupportedInputs(Exception): # noqa: B903 + """Exception to be raised during the construction of a :class:`Pair` in case it doesn't support the inputs.""" + + +class Pair(abc.ABC): + """ABC for all comparison pairs to be used in conjunction with :func:`assert_equal`. + + Each subclass needs to overwrite :meth:`Pair.compare` that performs the actual comparison. + + Each pair receives **all** options, so select the ones applicable for the subclass and forward the rest to the + super class. Raising an :class:`UnsupportedInputs` during constructions indicates that the pair is not able to + handle the inputs and the next pair type will be tried. + + All other errors should be raised as :class:`ErrorMeta`. After the instantiation, :meth:`Pair._make_error_meta` can + be used to automatically handle overwriting the message with a user supplied one and id handling. + """ + + def __init__( + self, + actual: Any, + expected: Any, + *, + id: Tuple[Any, ...] = (), + **unknown_parameters: Any, + ) -> None: + self.actual = actual + self.expected = expected + self.id = id + self._unknown_parameters = unknown_parameters + + @staticmethod + def _inputs_not_supported() -> NoReturn: + raise UnsupportedInputs() + + @staticmethod + def _check_inputs_isinstance(*inputs: Any, cls: Union[Type, Tuple[Type, ...]]): + """Checks if all inputs are instances of a given class and raise :class:`UnsupportedInputs` otherwise.""" + if not all(isinstance(input, cls) for input in inputs): + Pair._inputs_not_supported() + + def _fail( + self, type: Type[Exception], msg: str, *, id: Tuple[Any, ...] = () + ) -> NoReturn: + """Raises an :class:`ErrorMeta` from a given exception type and message and the stored id. + + .. warning:: + + If you use this before the ``super().__init__(...)`` call in the constructor, you have to pass the ``id`` + explicitly. + """ + raise ErrorMeta(type, msg, id=self.id if not id and hasattr(self, "id") else id) + + @abc.abstractmethod + def compare(self) -> None: + """Compares the inputs and raises an :class`ErrorMeta` in case they mismatch.""" + + def extra_repr(self) -> Sequence[Union[str, Tuple[str, Any]]]: + """Returns extra information that will be included in the representation. + + Should be overwritten by all subclasses that use additional options. The representation of the object will only + be surfaced in case we encounter an unexpected error and thus should help debug the issue. Can be a sequence of + key-value-pairs or attribute names. + """ + return [] + + def __repr__(self) -> str: + head = f"{type(self).__name__}(" + tail = ")" + body = [ + f" {name}={value!s}," + for name, value in [ + ("id", self.id), + ("actual", self.actual), + ("expected", self.expected), + *[ + (extra, getattr(self, extra)) if isinstance(extra, str) else extra + for extra in self.extra_repr() + ], + ] + ] + return "\n".join((head, *body, *tail)) + + +class ObjectPair(Pair): + """Pair for any type of inputs that will be compared with the `==` operator. + + .. note:: + + Since this will instantiate for any kind of inputs, it should only be used as fallback after all other pairs + couldn't handle the inputs. + + """ + + def compare(self) -> None: + try: + equal = self.actual == self.expected + except Exception as error: + # We are not using `self._raise_error_meta` here since we need the exception chaining + raise ErrorMeta( + ValueError, + f"{self.actual} == {self.expected} failed with:\n{error}.", + id=self.id, + ) from error + + if not equal: + self._fail(AssertionError, f"{self.actual} != {self.expected}") + + +class NonePair(Pair): + """Pair for ``None`` inputs.""" + + def __init__(self, actual: Any, expected: Any, **other_parameters: Any) -> None: + if not (actual is None or expected is None): + self._inputs_not_supported() + + super().__init__(actual, expected, **other_parameters) + + def compare(self) -> None: + if not (self.actual is None and self.expected is None): + self._fail( + AssertionError, f"None mismatch: {self.actual} is not {self.expected}" + ) + + +class BooleanPair(Pair): + """Pair for :class:`bool` inputs. + + .. note:: + + If ``numpy`` is available, also handles :class:`numpy.bool_` inputs. + + """ + + def __init__( + self, + actual: Any, + expected: Any, + *, + id: Tuple[Any, ...], + **other_parameters: Any, + ) -> None: + actual, expected = self._process_inputs(actual, expected, id=id) + super().__init__(actual, expected, **other_parameters) + + @property + def _supported_types(self) -> Tuple[Type, ...]: + cls: List[Type] = [bool] + if NUMPY_AVAILABLE: + cls.append(np.bool_) + return tuple(cls) + + def _process_inputs( + self, actual: Any, expected: Any, *, id: Tuple[Any, ...] + ) -> Tuple[bool, bool]: + self._check_inputs_isinstance(actual, expected, cls=self._supported_types) + actual, expected = ( + self._to_bool(bool_like, id=id) for bool_like in (actual, expected) + ) + return actual, expected + + def _to_bool(self, bool_like: Any, *, id: Tuple[Any, ...]) -> bool: + if isinstance(bool_like, bool): + return bool_like + elif isinstance(bool_like, np.bool_): + return bool_like.item() + else: + raise ErrorMeta( + TypeError, f"Unknown boolean type {type(bool_like)}.", id=id + ) + + def compare(self) -> None: + if self.actual is not self.expected: + self._fail( + AssertionError, + f"Booleans mismatch: {self.actual} is not {self.expected}", + ) + + +class NumberPair(Pair): + """Pair for Python number (:class:`int`, :class:`float`, and :class:`complex`) inputs. + + .. note:: + + If ``numpy`` is available, also handles :class:`numpy.number` inputs. + + Kwargs: + rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default + values based on the type are selected with the below table. + atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default + values based on the type are selected with the below table. + equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``. + check_dtype (bool): If ``True``, the type of the inputs will be checked for equality. Defaults to ``False``. + + The following table displays correspondence between Python number type and the ``torch.dtype``'s. See + :func:`assert_close` for the corresponding tolerances. + + +------------------+-------------------------------+ + | ``type`` | corresponding ``torch.dtype`` | + +==================+===============================+ + | :class:`int` | :attr:`~torch.int64` | + +------------------+-------------------------------+ + | :class:`float` | :attr:`~torch.float64` | + +------------------+-------------------------------+ + | :class:`complex` | :attr:`~torch.complex64` | + +------------------+-------------------------------+ + """ + + _TYPE_TO_DTYPE = { + int: torch.int64, + float: torch.float64, + complex: torch.complex128, + } + _NUMBER_TYPES = tuple(_TYPE_TO_DTYPE.keys()) + + def __init__( + self, + actual: Any, + expected: Any, + *, + id: Tuple[Any, ...] = (), + rtol: Optional[float] = None, + atol: Optional[float] = None, + equal_nan: bool = False, + check_dtype: bool = False, + **other_parameters: Any, + ) -> None: + actual, expected = self._process_inputs(actual, expected, id=id) + super().__init__(actual, expected, id=id, **other_parameters) + + self.rtol, self.atol = get_tolerances( + *[self._TYPE_TO_DTYPE[type(input)] for input in (actual, expected)], + rtol=rtol, + atol=atol, + id=id, + ) + self.equal_nan = equal_nan + self.check_dtype = check_dtype + + @property + def _supported_types(self) -> Tuple[Type, ...]: + cls = list(self._NUMBER_TYPES) + if NUMPY_AVAILABLE: + cls.append(np.number) + return tuple(cls) + + def _process_inputs( + self, actual: Any, expected: Any, *, id: Tuple[Any, ...] + ) -> Tuple[Union[int, float, complex], Union[int, float, complex]]: + self._check_inputs_isinstance(actual, expected, cls=self._supported_types) + actual, expected = ( + self._to_number(number_like, id=id) for number_like in (actual, expected) + ) + return actual, expected + + def _to_number( + self, number_like: Any, *, id: Tuple[Any, ...] + ) -> Union[int, float, complex]: + if NUMPY_AVAILABLE and isinstance(number_like, np.number): + return number_like.item() + elif isinstance(number_like, self._NUMBER_TYPES): + return number_like # type: ignore[return-value] + else: + raise ErrorMeta( + TypeError, f"Unknown number type {type(number_like)}.", id=id + ) + + def compare(self) -> None: + if self.check_dtype and type(self.actual) is not type(self.expected): + self._fail( + AssertionError, + f"The (d)types do not match: {type(self.actual)} != {type(self.expected)}.", + ) + + if self.actual == self.expected: + return + + if self.equal_nan and cmath.isnan(self.actual) and cmath.isnan(self.expected): + return + + abs_diff = abs(self.actual - self.expected) + tolerance = self.atol + self.rtol * abs(self.expected) + + if cmath.isfinite(abs_diff) and abs_diff <= tolerance: + return + + self._fail( + AssertionError, + make_scalar_mismatch_msg( + self.actual, self.expected, rtol=self.rtol, atol=self.atol + ), + ) + + def extra_repr(self) -> Sequence[str]: + return ( + "rtol", + "atol", + "equal_nan", + "check_dtype", + ) + + +class TensorLikePair(Pair): + """Pair for :class:`torch.Tensor`-like inputs. + + Kwargs: + allow_subclasses (bool): + rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default + values based on the type are selected. See :func:assert_close: for details. + atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default + values based on the type are selected. See :func:assert_close: for details. + equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``. + check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same + :attr:`~torch.Tensor.device`. If this check is disabled, tensors on different + :attr:`~torch.Tensor.device`'s are moved to the CPU before being compared. + check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this + check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to + :func:`torch.promote_types`) before being compared. + check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this + check is disabled, tensors with different ``layout``'s are converted to strided tensors before being + compared. + check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride. + """ + + def __init__( + self, + actual: Any, + expected: Any, + *, + id: Tuple[Any, ...] = (), + allow_subclasses: bool = True, + rtol: Optional[float] = None, + atol: Optional[float] = None, + equal_nan: bool = False, + check_device: bool = True, + check_dtype: bool = True, + check_layout: bool = True, + check_stride: bool = False, + **other_parameters: Any, + ): + actual, expected = self._process_inputs( + actual, expected, id=id, allow_subclasses=allow_subclasses + ) + super().__init__(actual, expected, id=id, **other_parameters) + + self.rtol, self.atol = get_tolerances( + actual, expected, rtol=rtol, atol=atol, id=self.id + ) + self.equal_nan = equal_nan + self.check_device = check_device + self.check_dtype = check_dtype + self.check_layout = check_layout + self.check_stride = check_stride + + def _process_inputs( + self, actual: Any, expected: Any, *, id: Tuple[Any, ...], allow_subclasses: bool + ) -> Tuple[torch.Tensor, torch.Tensor]: + directly_related = isinstance(actual, type(expected)) or isinstance( + expected, type(actual) + ) + if not directly_related: + self._inputs_not_supported() + + if not allow_subclasses and type(actual) is not type(expected): + self._inputs_not_supported() + + actual, expected = (self._to_tensor(input) for input in (actual, expected)) + for tensor in (actual, expected): + self._check_supported(tensor, id=id) + return actual, expected + + def _to_tensor(self, tensor_like: Any) -> torch.Tensor: + if isinstance(tensor_like, torch.Tensor): + return tensor_like + + try: + return torch.as_tensor(tensor_like) + except Exception: + self._inputs_not_supported() + + def _check_supported(self, tensor: torch.Tensor, *, id: Tuple[Any, ...]) -> None: + if tensor.layout not in { + torch.strided, + torch.sparse_coo, + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + raise ErrorMeta( + ValueError, f"Unsupported tensor layout {tensor.layout}", id=id + ) + + def compare(self) -> None: + actual, expected = self.actual, self.expected + + self._compare_attributes(actual, expected) + if any(input.device.type == "meta" for input in (actual, expected)): + return + + actual, expected = self._equalize_attributes(actual, expected) + self._compare_values(actual, expected) + + def _compare_attributes( + self, + actual: torch.Tensor, + expected: torch.Tensor, + ) -> None: + """Checks if the attributes of two tensors match. + + Always checks + + - the :attr:`~torch.Tensor.shape`, + - whether both inputs are quantized or not, + - and if they use the same quantization scheme. + + Checks for + + - :attr:`~torch.Tensor.layout`, + - :meth:`~torch.Tensor.stride`, + - :attr:`~torch.Tensor.device`, and + - :attr:`~torch.Tensor.dtype` + + are optional and can be disabled through the corresponding ``check_*`` flag during construction of the pair. + """ + + def raise_mismatch_error( + attribute_name: str, actual_value: Any, expected_value: Any + ) -> NoReturn: + self._fail( + AssertionError, + f"The values for attribute '{attribute_name}' do not match: {actual_value} != {expected_value}.", + ) + + if actual.shape != expected.shape: + raise_mismatch_error("shape", actual.shape, expected.shape) + + if actual.is_quantized != expected.is_quantized: + raise_mismatch_error( + "is_quantized", actual.is_quantized, expected.is_quantized + ) + elif actual.is_quantized and actual.qscheme() != expected.qscheme(): + raise_mismatch_error("qscheme()", actual.qscheme(), expected.qscheme()) + + if actual.layout != expected.layout: + if self.check_layout: + raise_mismatch_error("layout", actual.layout, expected.layout) + elif ( + actual.layout == torch.strided + and self.check_stride + and actual.stride() != expected.stride() + ): + raise_mismatch_error("stride()", actual.stride(), expected.stride()) + + if self.check_device and actual.device != expected.device: + raise_mismatch_error("device", actual.device, expected.device) + + if self.check_dtype and actual.dtype != expected.dtype: + raise_mismatch_error("dtype", actual.dtype, expected.dtype) + + def _equalize_attributes( + self, actual: torch.Tensor, expected: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Equalizes some attributes of two tensors for value comparison. + + If ``actual`` and ``expected`` are ... + + - ... not on the same :attr:`~torch.Tensor.device`, they are moved CPU memory. + - ... not of the same ``dtype``, they are promoted to a common ``dtype`` (according to + :func:`torch.promote_types`). + - ... not of the same ``layout``, they are converted to strided tensors. + + Args: + actual (Tensor): Actual tensor. + expected (Tensor): Expected tensor. + + Returns: + (Tuple[Tensor, Tensor]): Equalized tensors. + """ + # The comparison logic uses operators currently not supported by the MPS backends. + # See https://github.com/pytorch/pytorch/issues/77144 for details. + # TODO: Remove this conversion as soon as all operations are supported natively by the MPS backend + if actual.is_mps or expected.is_mps: # type: ignore[attr-defined] + actual = actual.cpu() + expected = expected.cpu() + + if actual.device != expected.device: + actual = actual.cpu() + expected = expected.cpu() + + if actual.dtype != expected.dtype: + dtype = torch.promote_types(actual.dtype, expected.dtype) + actual = actual.to(dtype) + expected = expected.to(dtype) + + if actual.layout != expected.layout: + # These checks are needed, since Tensor.to_dense() fails on tensors that are already strided + actual = actual.to_dense() if actual.layout != torch.strided else actual + expected = ( + expected.to_dense() if expected.layout != torch.strided else expected + ) + + return actual, expected + + def _compare_values(self, actual: torch.Tensor, expected: torch.Tensor) -> None: + if actual.is_quantized: + compare_fn = self._compare_quantized_values + elif actual.is_sparse: + compare_fn = self._compare_sparse_coo_values + elif actual.layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + compare_fn = self._compare_sparse_compressed_values + else: + compare_fn = self._compare_regular_values_close + + compare_fn( + actual, expected, rtol=self.rtol, atol=self.atol, equal_nan=self.equal_nan + ) + + def _compare_quantized_values( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + rtol: float, + atol: float, + equal_nan: bool, + ) -> None: + """Compares quantized tensors by comparing the :meth:`~torch.Tensor.dequantize`'d variants for closeness. + + .. note:: + + A detailed discussion about why only the dequantized variant is checked for closeness rather than checking + the individual quantization parameters for closeness and the integer representation for equality can be + found in https://github.com/pytorch/pytorch/issues/68548. + """ + return self._compare_regular_values_close( + actual.dequantize(), + expected.dequantize(), + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + identifier=lambda default_identifier: f"Quantized {default_identifier.lower()}", + ) + + def _compare_sparse_coo_values( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + rtol: float, + atol: float, + equal_nan: bool, + ) -> None: + """Compares sparse COO tensors by comparing + + - the number of sparse dimensions, + - the number of non-zero elements (nnz) for equality, + - the indices for equality, and + - the values for closeness. + """ + if actual.sparse_dim() != expected.sparse_dim(): + self._fail( + AssertionError, + ( + f"The number of sparse dimensions in sparse COO tensors does not match: " + f"{actual.sparse_dim()} != {expected.sparse_dim()}" + ), + ) + + if actual._nnz() != expected._nnz(): + self._fail( + AssertionError, + ( + f"The number of specified values in sparse COO tensors does not match: " + f"{actual._nnz()} != {expected._nnz()}" + ), + ) + + self._compare_regular_values_equal( + actual._indices(), + expected._indices(), + identifier="Sparse COO indices", + ) + self._compare_regular_values_close( + actual._values(), + expected._values(), + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + identifier="Sparse COO values", + ) + + def _compare_sparse_compressed_values( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + rtol: float, + atol: float, + equal_nan: bool, + ) -> None: + """Compares sparse compressed tensors by comparing + + - the number of non-zero elements (nnz) for equality, + - the plain indices for equality, + - the compressed indices for equality, and + - the values for closeness. + """ + format_name, compressed_indices_method, plain_indices_method = { + torch.sparse_csr: ( + "CSR", + torch.Tensor.crow_indices, + torch.Tensor.col_indices, + ), + torch.sparse_csc: ( + "CSC", + torch.Tensor.ccol_indices, + torch.Tensor.row_indices, + ), + torch.sparse_bsr: ( + "BSR", + torch.Tensor.crow_indices, + torch.Tensor.col_indices, + ), + torch.sparse_bsc: ( + "BSC", + torch.Tensor.ccol_indices, + torch.Tensor.row_indices, + ), + }[actual.layout] + + if actual._nnz() != expected._nnz(): + self._fail( + AssertionError, + ( + f"The number of specified values in sparse {format_name} tensors does not match: " + f"{actual._nnz()} != {expected._nnz()}" + ), + ) + + # Compressed and plain indices in the CSR / CSC / BSR / BSC sparse formates can be `torch.int32` _or_ + # `torch.int64`. While the same dtype is enforced for the compressed and plain indices of a single tensor, it + # can be different between two tensors. Thus, we need to convert them to the same dtype, or the comparison will + # fail. + actual_compressed_indices = compressed_indices_method(actual) + expected_compressed_indices = compressed_indices_method(expected) + indices_dtype = torch.promote_types( + actual_compressed_indices.dtype, expected_compressed_indices.dtype + ) + + self._compare_regular_values_equal( + actual_compressed_indices.to(indices_dtype), + expected_compressed_indices.to(indices_dtype), + identifier=f"Sparse {format_name} {compressed_indices_method.__name__}", + ) + self._compare_regular_values_equal( + plain_indices_method(actual).to(indices_dtype), + plain_indices_method(expected).to(indices_dtype), + identifier=f"Sparse {format_name} {plain_indices_method.__name__}", + ) + self._compare_regular_values_close( + actual.values(), + expected.values(), + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + identifier=f"Sparse {format_name} values", + ) + + def _compare_regular_values_equal( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + equal_nan: bool = False, + identifier: Optional[Union[str, Callable[[str], str]]] = None, + ) -> None: + """Checks if the values of two tensors are equal.""" + self._compare_regular_values_close( + actual, expected, rtol=0, atol=0, equal_nan=equal_nan, identifier=identifier + ) + + def _compare_regular_values_close( + self, + actual: torch.Tensor, + expected: torch.Tensor, + *, + rtol: float, + atol: float, + equal_nan: bool, + identifier: Optional[Union[str, Callable[[str], str]]] = None, + ) -> None: + """Checks if the values of two tensors are close up to a desired tolerance.""" + matches = torch.isclose( + actual, expected, rtol=rtol, atol=atol, equal_nan=equal_nan + ) + if torch.all(matches): + return + + if actual.shape == torch.Size([]): + msg = make_scalar_mismatch_msg( + actual.item(), + expected.item(), + rtol=rtol, + atol=atol, + identifier=identifier, + ) + else: + msg = make_tensor_mismatch_msg( + actual, expected, matches, rtol=rtol, atol=atol, identifier=identifier + ) + self._fail(AssertionError, msg) + + def extra_repr(self) -> Sequence[str]: + return ( + "rtol", + "atol", + "equal_nan", + "check_device", + "check_dtype", + "check_layout", + "check_stride", + ) + + +def originate_pairs( + actual: Any, + expected: Any, + *, + pair_types: Sequence[Type[Pair]], + sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,), + mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,), + id: Tuple[Any, ...] = (), + **options: Any, +) -> List[Pair]: + """Originates pairs from the individual inputs. + + ``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or + :class:`~collections.abc.Mapping`'s. In this case the pairs are originated by recursing through them. + + Args: + actual (Any): Actual input. + expected (Any): Expected input. + pair_types (Sequence[Type[Pair]]): Sequence of pair types that will be tried to construct with the inputs. + First successful pair will be used. + sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise. + mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise. + id (Tuple[Any, ...]): Optional id of a pair that will be included in an error message. + **options (Any): Options passed to each pair during construction. + + Raises: + ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Sequence`'s, but their + length does not match. + ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Mapping`'s, but their set of + keys do not match. + ErrorMeta: With :class`TypeError`, if no pair is able to handle the inputs. + ErrorMeta: With any expected exception that happens during the construction of a pair. + + Returns: + (List[Pair]): Originated pairs. + """ + # We explicitly exclude str's here since they are self-referential and would cause an infinite recursion loop: + # "a" == "a"[0][0]... + if ( + isinstance(actual, sequence_types) + and not isinstance(actual, str) + and isinstance(expected, sequence_types) + and not isinstance(expected, str) + ): + actual_len = len(actual) + expected_len = len(expected) + if actual_len != expected_len: + raise ErrorMeta( + AssertionError, + f"The length of the sequences mismatch: {actual_len} != {expected_len}", + id=id, + ) + + pairs = [] + for idx in range(actual_len): + pairs.extend( + originate_pairs( + actual[idx], + expected[idx], + pair_types=pair_types, + sequence_types=sequence_types, + mapping_types=mapping_types, + id=(*id, idx), + **options, + ) + ) + return pairs + + elif isinstance(actual, mapping_types) and isinstance(expected, mapping_types): + actual_keys = set(actual.keys()) + expected_keys = set(expected.keys()) + if actual_keys != expected_keys: + missing_keys = expected_keys - actual_keys + additional_keys = actual_keys - expected_keys + raise ErrorMeta( + AssertionError, + ( + f"The keys of the mappings do not match:\n" + f"Missing keys in the actual mapping: {sorted(missing_keys)}\n" + f"Additional keys in the actual mapping: {sorted(additional_keys)}" + ), + id=id, + ) + + keys: Collection = actual_keys + # Since the origination aborts after the first failure, we try to be deterministic + with contextlib.suppress(Exception): + keys = sorted(keys) + + pairs = [] + for key in keys: + pairs.extend( + originate_pairs( + actual[key], + expected[key], + pair_types=pair_types, + sequence_types=sequence_types, + mapping_types=mapping_types, + id=(*id, key), + **options, + ) + ) + return pairs + + else: + for pair_type in pair_types: + try: + return [pair_type(actual, expected, id=id, **options)] + # Raising an `UnsupportedInputs` during origination indicates that the pair type is not able to handle the + # inputs. Thus, we try the next pair type. + except UnsupportedInputs: + continue + # Raising an `ErrorMeta` during origination is the orderly way to abort and so we simply re-raise it. This + # is only in a separate branch, because the one below would also except it. + except ErrorMeta: + raise + # Raising any other exception during origination is unexpected and will give some extra information about + # what happened. If applicable, the exception should be expected in the future. + except Exception as error: + raise RuntimeError( + f"Originating a {pair_type.__name__}() at item {''.join(str([item]) for item in id)} with\n\n" + f"{type(actual).__name__}(): {actual}\n\n" + f"and\n\n" + f"{type(expected).__name__}(): {expected}\n\n" + f"resulted in the unexpected exception above. " + f"If you are a user and see this message during normal operation " + "please file an issue at https://github.com/pytorch/pytorch/issues. " + "If you are a developer and working on the comparison functions, " + "please except the previous error and raise an expressive `ErrorMeta` instead." + ) from error + else: + raise ErrorMeta( + TypeError, + f"No comparison pair was able to handle inputs of type {type(actual)} and {type(expected)}.", + id=id, + ) + + +def not_close_error_metas( + actual: Any, + expected: Any, + *, + pair_types: Sequence[Type[Pair]] = (ObjectPair,), + sequence_types: Tuple[Type, ...] = (collections.abc.Sequence,), + mapping_types: Tuple[Type, ...] = (collections.abc.Mapping,), + **options: Any, +) -> List[ErrorMeta]: + """Asserts that inputs are equal. + + ``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or + :class:`~collections.abc.Mapping`'s. In this case the comparison happens elementwise by recursing through them. + + Args: + actual (Any): Actual input. + expected (Any): Expected input. + pair_types (Sequence[Type[Pair]]): Sequence of :class:`Pair` types that will be tried to construct with the + inputs. First successful pair will be used. Defaults to only using :class:`ObjectPair`. + sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise. + mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise. + **options (Any): Options passed to each pair during construction. + """ + # Hide this function from `pytest`'s traceback + __tracebackhide__ = True + + try: + pairs = originate_pairs( + actual, + expected, + pair_types=pair_types, + sequence_types=sequence_types, + mapping_types=mapping_types, + **options, + ) + except ErrorMeta as error_meta: + # Explicitly raising from None to hide the internal traceback + raise error_meta.to_error() from None + + error_metas: List[ErrorMeta] = [] + for pair in pairs: + try: + pair.compare() + except ErrorMeta as error_meta: + error_metas.append(error_meta) + # Raising any exception besides `ErrorMeta` while comparing is unexpected and will give some extra information + # about what happened. If applicable, the exception should be expected in the future. + except Exception as error: + raise RuntimeError( + f"Comparing\n\n" + f"{pair}\n\n" + f"resulted in the unexpected exception above. " + f"If you are a user and see this message during normal operation " + "please file an issue at https://github.com/pytorch/pytorch/issues. " + "If you are a developer and working on the comparison functions, " + "please except the previous error and raise an expressive `ErrorMeta` instead." + ) from error + + # [ErrorMeta Cycles] + # ErrorMeta objects in this list capture + # tracebacks that refer to the frame of this function. + # The local variable `error_metas` refers to the error meta + # objects, creating a reference cycle. Frames in the traceback + # would not get freed until cycle collection, leaking cuda memory in tests. + # We break the cycle by removing the reference to the error_meta objects + # from this frame as it returns. + error_metas = [error_metas] + return error_metas.pop() + + +def assert_close( + actual: Any, + expected: Any, + *, + allow_subclasses: bool = True, + rtol: Optional[float] = None, + atol: Optional[float] = None, + equal_nan: bool = False, + check_device: bool = True, + check_dtype: bool = True, + check_layout: bool = True, + check_stride: bool = False, + msg: Optional[Union[str, Callable[[str], str]]] = None, +): + r"""Asserts that ``actual`` and ``expected`` are close. + + If ``actual`` and ``expected`` are strided, non-quantized, real-valued, and finite, they are considered close if + + .. math:: + + \lvert \text{actual} - \text{expected} \rvert \le \texttt{atol} + \texttt{rtol} \cdot \lvert \text{expected} \rvert + + Non-finite values (``-inf`` and ``inf``) are only considered close if and only if they are equal. ``NaN``'s are + only considered equal to each other if ``equal_nan`` is ``True``. + + In addition, they are only considered close if they have the same + + - :attr:`~torch.Tensor.device` (if ``check_device`` is ``True``), + - ``dtype`` (if ``check_dtype`` is ``True``), + - ``layout`` (if ``check_layout`` is ``True``), and + - stride (if ``check_stride`` is ``True``). + + If either ``actual`` or ``expected`` is a meta tensor, only the attribute checks will be performed. + + If ``actual`` and ``expected`` are sparse (either having COO, CSR, CSC, BSR, or BSC layout), their strided members are + checked individually. Indices, namely ``indices`` for COO, ``crow_indices`` and ``col_indices`` for CSR and BSR, + or ``ccol_indices`` and ``row_indices`` for CSC and BSC layouts, respectively, + are always checked for equality whereas the values are checked for closeness according to the definition above. + + If ``actual`` and ``expected`` are quantized, they are considered close if they have the same + :meth:`~torch.Tensor.qscheme` and the result of :meth:`~torch.Tensor.dequantize` is close according to the + definition above. + + ``actual`` and ``expected`` can be :class:`~torch.Tensor`'s or any tensor-or-scalar-likes from which + :class:`torch.Tensor`'s can be constructed with :func:`torch.as_tensor`. Except for Python scalars the input types + have to be directly related. In addition, ``actual`` and ``expected`` can be :class:`~collections.abc.Sequence`'s + or :class:`~collections.abc.Mapping`'s in which case they are considered close if their structure matches and all + their elements are considered close according to the above definition. + + .. note:: + + Python scalars are an exception to the type relation requirement, because their :func:`type`, i.e. + :class:`int`, :class:`float`, and :class:`complex`, is equivalent to the ``dtype`` of a tensor-like. Thus, + Python scalars of different types can be checked, but require ``check_dtype=False``. + + Args: + actual (Any): Actual input. + expected (Any): Expected input. + allow_subclasses (bool): If ``True`` (default) and except for Python scalars, inputs of directly related types + are allowed. Otherwise type equality is required. + rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default + values based on the :attr:`~torch.Tensor.dtype` are selected with the below table. + atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default + values based on the :attr:`~torch.Tensor.dtype` are selected with the below table. + equal_nan (Union[bool, str]): If ``True``, two ``NaN`` values will be considered equal. + check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same + :attr:`~torch.Tensor.device`. If this check is disabled, tensors on different + :attr:`~torch.Tensor.device`'s are moved to the CPU before being compared. + check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this + check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to + :func:`torch.promote_types`) before being compared. + check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this + check is disabled, tensors with different ``layout``'s are converted to strided tensors before being + compared. + check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride. + msg (Optional[Union[str, Callable[[str], str]]]): Optional error message to use in case a failure occurs during + the comparison. Can also passed as callable in which case it will be called with the generated message and + should return the new message. + + Raises: + ValueError: If no :class:`torch.Tensor` can be constructed from an input. + ValueError: If only ``rtol`` or ``atol`` is specified. + AssertionError: If corresponding inputs are not Python scalars and are not directly related. + AssertionError: If ``allow_subclasses`` is ``False``, but corresponding inputs are not Python scalars and have + different types. + AssertionError: If the inputs are :class:`~collections.abc.Sequence`'s, but their length does not match. + AssertionError: If the inputs are :class:`~collections.abc.Mapping`'s, but their set of keys do not match. + AssertionError: If corresponding tensors do not have the same :attr:`~torch.Tensor.shape`. + AssertionError: If ``check_layout`` is ``True``, but corresponding tensors do not have the same + :attr:`~torch.Tensor.layout`. + AssertionError: If only one of corresponding tensors is quantized. + AssertionError: If corresponding tensors are quantized, but have different :meth:`~torch.Tensor.qscheme`'s. + AssertionError: If ``check_device`` is ``True``, but corresponding tensors are not on the same + :attr:`~torch.Tensor.device`. + AssertionError: If ``check_dtype`` is ``True``, but corresponding tensors do not have the same ``dtype``. + AssertionError: If ``check_stride`` is ``True``, but corresponding strided tensors do not have the same stride. + AssertionError: If the values of corresponding tensors are not close according to the definition above. + + The following table displays the default ``rtol`` and ``atol`` for different ``dtype``'s. In case of mismatching + ``dtype``'s, the maximum of both tolerances is used. + + +---------------------------+------------+----------+ + | ``dtype`` | ``rtol`` | ``atol`` | + +===========================+============+==========+ + | :attr:`~torch.float16` | ``1e-3`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.bfloat16` | ``1.6e-2`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.float32` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.float64` | ``1e-7`` | ``1e-7`` | + +---------------------------+------------+----------+ + | :attr:`~torch.complex32` | ``1e-3`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.complex64` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.complex128` | ``1e-7`` | ``1e-7`` | + +---------------------------+------------+----------+ + | :attr:`~torch.quint8` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.quint2x4` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.quint4x2` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.qint8` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | :attr:`~torch.qint32` | ``1.3e-6`` | ``1e-5`` | + +---------------------------+------------+----------+ + | other | ``0.0`` | ``0.0`` | + +---------------------------+------------+----------+ + + .. note:: + + :func:`~torch.testing.assert_close` is highly configurable with strict default settings. Users are encouraged + to :func:`~functools.partial` it to fit their use case. For example, if an equality check is needed, one might + define an ``assert_equal`` that uses zero tolerances for every ``dtype`` by default: + + >>> import functools + >>> assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0) + >>> assert_equal(1e-9, 1e-10) + Traceback (most recent call last): + ... + AssertionError: Scalars are not equal! + + Expected 1e-10 but got 1e-09. + Absolute difference: 9.000000000000001e-10 + Relative difference: 9.0 + + Examples: + >>> # tensor to tensor comparison + >>> expected = torch.tensor([1e0, 1e-1, 1e-2]) + >>> actual = torch.acos(torch.cos(expected)) + >>> torch.testing.assert_close(actual, expected) + + >>> # scalar to scalar comparison + >>> import math + >>> expected = math.sqrt(2.0) + >>> actual = 2.0 / math.sqrt(2.0) + >>> torch.testing.assert_close(actual, expected) + + >>> # numpy array to numpy array comparison + >>> import numpy as np + >>> expected = np.array([1e0, 1e-1, 1e-2]) + >>> actual = np.arccos(np.cos(expected)) + >>> torch.testing.assert_close(actual, expected) + + >>> # sequence to sequence comparison + >>> import numpy as np + >>> # The types of the sequences do not have to match. They only have to have the same + >>> # length and their elements have to match. + >>> expected = [torch.tensor([1.0]), 2.0, np.array(3.0)] + >>> actual = tuple(expected) + >>> torch.testing.assert_close(actual, expected) + + >>> # mapping to mapping comparison + >>> from collections import OrderedDict + >>> import numpy as np + >>> foo = torch.tensor(1.0) + >>> bar = 2.0 + >>> baz = np.array(3.0) + >>> # The types and a possible ordering of mappings do not have to match. They only + >>> # have to have the same set of keys and their elements have to match. + >>> expected = OrderedDict([("foo", foo), ("bar", bar), ("baz", baz)]) + >>> actual = {"baz": baz, "bar": bar, "foo": foo} + >>> torch.testing.assert_close(actual, expected) + + >>> expected = torch.tensor([1.0, 2.0, 3.0]) + >>> actual = expected.clone() + >>> # By default, directly related instances can be compared + >>> torch.testing.assert_close(torch.nn.Parameter(actual), expected) + >>> # This check can be made more strict with allow_subclasses=False + >>> torch.testing.assert_close( + ... torch.nn.Parameter(actual), expected, allow_subclasses=False + ... ) + Traceback (most recent call last): + ... + TypeError: No comparison pair was able to handle inputs of type + and . + >>> # If the inputs are not directly related, they are never considered close + >>> torch.testing.assert_close(actual.numpy(), expected) + Traceback (most recent call last): + ... + TypeError: No comparison pair was able to handle inputs of type + and . + >>> # Exceptions to these rules are Python scalars. They can be checked regardless of + >>> # their type if check_dtype=False. + >>> torch.testing.assert_close(1.0, 1, check_dtype=False) + + >>> # NaN != NaN by default. + >>> expected = torch.tensor(float("Nan")) + >>> actual = expected.clone() + >>> torch.testing.assert_close(actual, expected) + Traceback (most recent call last): + ... + AssertionError: Scalars are not close! + + Expected nan but got nan. + Absolute difference: nan (up to 1e-05 allowed) + Relative difference: nan (up to 1.3e-06 allowed) + >>> torch.testing.assert_close(actual, expected, equal_nan=True) + + >>> expected = torch.tensor([1.0, 2.0, 3.0]) + >>> actual = torch.tensor([1.0, 4.0, 5.0]) + >>> # The default error message can be overwritten. + >>> torch.testing.assert_close(actual, expected, msg="Argh, the tensors are not close!") + Traceback (most recent call last): + ... + AssertionError: Argh, the tensors are not close! + >>> # If msg is a callable, it can be used to augment the generated message with + >>> # extra information + >>> torch.testing.assert_close( + ... actual, expected, msg=lambda msg: f"Header\n\n{msg}\n\nFooter" + ... ) + Traceback (most recent call last): + ... + AssertionError: Header + + Tensor-likes are not close! + + Mismatched elements: 2 / 3 (66.7%) + Greatest absolute difference: 2.0 at index (1,) (up to 1e-05 allowed) + Greatest relative difference: 1.0 at index (1,) (up to 1.3e-06 allowed) + + Footer + """ + # Hide this function from `pytest`'s traceback + __tracebackhide__ = True + + error_metas = not_close_error_metas( + actual, + expected, + pair_types=( + NonePair, + BooleanPair, + NumberPair, + TensorLikePair, + ), + allow_subclasses=allow_subclasses, + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + check_device=check_device, + check_dtype=check_dtype, + check_layout=check_layout, + check_stride=check_stride, + msg=msg, + ) + + if error_metas: + # TODO: compose all metas into one AssertionError + raise error_metas[0].to_error(msg) + + +def assert_allclose( + actual: Any, + expected: Any, + rtol: Optional[float] = None, + atol: Optional[float] = None, + equal_nan: bool = True, + msg: str = "", +) -> None: + """ + .. warning:: + + :func:`torch.testing.assert_allclose` is deprecated since ``1.12`` and will be removed in a future release. + Please use :func:`torch.testing.assert_close` instead. You can find detailed upgrade instructions + `here `_. + """ + warnings.warn( + "`torch.testing.assert_allclose()` is deprecated since 1.12 and will be removed in a future release. " + "Please use `torch.testing.assert_close()` instead. " + "You can find detailed upgrade instructions in https://github.com/pytorch/pytorch/issues/61844.", + FutureWarning, + stacklevel=2, + ) + + if not isinstance(actual, torch.Tensor): + actual = torch.tensor(actual) + if not isinstance(expected, torch.Tensor): + expected = torch.tensor(expected, dtype=actual.dtype) + + if rtol is None and atol is None: + rtol, atol = default_tolerances( + actual, + expected, + dtype_precisions={ + torch.float16: (1e-3, 1e-3), + torch.float32: (1e-4, 1e-5), + torch.float64: (1e-5, 1e-8), + }, + ) + + torch.testing.assert_close( + actual, + expected, + rtol=rtol, + atol=atol, + equal_nan=equal_nan, + check_device=True, + check_dtype=False, + check_stride=False, + msg=msg or None, + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_creation.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_creation.py new file mode 100644 index 0000000000000000000000000000000000000000..a46d8cf590e407054d7c26350b74c1e24d2ee8ce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_creation.py @@ -0,0 +1,253 @@ +""" +This module contains tensor creation utilities. +""" + +import collections.abc +import math +import warnings +from typing import cast, List, Optional, Tuple, Union + +import torch + +_INTEGRAL_TYPES = [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64] +_FLOATING_TYPES = [torch.float16, torch.bfloat16, torch.float32, torch.float64] +_FLOATING_8BIT_TYPES = [torch.float8_e4m3fn, torch.float8_e5m2] +_COMPLEX_TYPES = [torch.complex32, torch.complex64, torch.complex128] +_BOOLEAN_OR_INTEGRAL_TYPES = [torch.bool, *_INTEGRAL_TYPES] +_FLOATING_OR_COMPLEX_TYPES = [*_FLOATING_TYPES, *_COMPLEX_TYPES] + + +def _uniform_random_(t: torch.Tensor, low: float, high: float) -> torch.Tensor: + # uniform_ requires to-from <= std::numeric_limits::max() + # Work around this by scaling the range before and after the PRNG + if high - low >= torch.finfo(t.dtype).max: + return t.uniform_(low / 2, high / 2).mul_(2) + else: + return t.uniform_(low, high) + + +def make_tensor( + *shape: Union[int, torch.Size, List[int], Tuple[int, ...]], + dtype: torch.dtype, + device: Union[str, torch.device], + low: Optional[float] = None, + high: Optional[float] = None, + requires_grad: bool = False, + noncontiguous: bool = False, + exclude_zero: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> torch.Tensor: + r"""Creates a tensor with the given :attr:`shape`, :attr:`device`, and :attr:`dtype`, and filled with + values uniformly drawn from ``[low, high)``. + + If :attr:`low` or :attr:`high` are specified and are outside the range of the :attr:`dtype`'s representable + finite values then they are clamped to the lowest or highest representable finite value, respectively. + If ``None``, then the following table describes the default values for :attr:`low` and :attr:`high`, + which depend on :attr:`dtype`. + + +---------------------------+------------+----------+ + | ``dtype`` | ``low`` | ``high`` | + +===========================+============+==========+ + | boolean type | ``0`` | ``2`` | + +---------------------------+------------+----------+ + | unsigned integral type | ``0`` | ``10`` | + +---------------------------+------------+----------+ + | signed integral types | ``-9`` | ``10`` | + +---------------------------+------------+----------+ + | floating types | ``-9`` | ``9`` | + +---------------------------+------------+----------+ + | complex types | ``-9`` | ``9`` | + +---------------------------+------------+----------+ + + Args: + shape (Tuple[int, ...]): Single integer or a sequence of integers defining the shape of the output tensor. + dtype (:class:`torch.dtype`): The data type of the returned tensor. + device (Union[str, torch.device]): The device of the returned tensor. + low (Optional[Number]): Sets the lower limit (inclusive) of the given range. If a number is provided it is + clamped to the least representable finite value of the given dtype. When ``None`` (default), + this value is determined based on the :attr:`dtype` (see the table above). Default: ``None``. + high (Optional[Number]): Sets the upper limit (exclusive) of the given range. If a number is provided it is + clamped to the greatest representable finite value of the given dtype. When ``None`` (default) this value + is determined based on the :attr:`dtype` (see the table above). Default: ``None``. + + .. deprecated:: 2.1 + + Passing ``low==high`` to :func:`~torch.testing.make_tensor` for floating or complex types is deprecated + since 2.1 and will be removed in 2.3. Use :func:`torch.full` instead. + + requires_grad (Optional[bool]): If autograd should record operations on the returned tensor. Default: ``False``. + noncontiguous (Optional[bool]): If `True`, the returned tensor will be noncontiguous. This argument is + ignored if the constructed tensor has fewer than two elements. Mutually exclusive with ``memory_format``. + exclude_zero (Optional[bool]): If ``True`` then zeros are replaced with the dtype's small positive value + depending on the :attr:`dtype`. For bool and integer types zero is replaced with one. For floating + point types it is replaced with the dtype's smallest positive normal number (the "tiny" value of the + :attr:`dtype`'s :func:`~torch.finfo` object), and for complex types it is replaced with a complex number + whose real and imaginary parts are both the smallest positive normal number representable by the complex + type. Default ``False``. + memory_format (Optional[torch.memory_format]): The memory format of the returned tensor. Mutually exclusive + with ``noncontiguous``. + + Raises: + ValueError: If ``requires_grad=True`` is passed for integral `dtype` + ValueError: If ``low >= high``. + ValueError: If either :attr:`low` or :attr:`high` is ``nan``. + ValueError: If both :attr:`noncontiguous` and :attr:`memory_format` are passed. + TypeError: If :attr:`dtype` isn't supported by this function. + + Examples: + >>> # xdoctest: +SKIP + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> from torch.testing import make_tensor + >>> # Creates a float tensor with values in [-1, 1) + >>> make_tensor((3,), device='cpu', dtype=torch.float32, low=-1, high=1) + >>> # xdoctest: +SKIP + tensor([ 0.1205, 0.2282, -0.6380]) + >>> # Creates a bool tensor on CUDA + >>> make_tensor((2, 2), device='cuda', dtype=torch.bool) + tensor([[False, False], + [False, True]], device='cuda:0') + """ + + def modify_low_high( + low: Optional[float], + high: Optional[float], + *, + lowest_inclusive: float, + highest_exclusive: float, + default_low: float, + default_high: float, + ) -> Tuple[float, float]: + """ + Modifies (and raises ValueError when appropriate) low and high values given by the user (input_low, input_high) + if required. + """ + + def clamp(a: float, l: float, h: float) -> float: + return min(max(a, l), h) + + low = low if low is not None else default_low + high = high if high is not None else default_high + + if any(isinstance(value, float) and math.isnan(value) for value in [low, high]): + raise ValueError( + f"`low` and `high` cannot be NaN, but got {low=} and {high=}" + ) + elif low == high and dtype in _FLOATING_OR_COMPLEX_TYPES: + warnings.warn( + "Passing `low==high` to `torch.testing.make_tensor` for floating or complex types " + "is deprecated since 2.1 and will be removed in 2.3. " + "Use torch.full(...) instead.", + FutureWarning, + ) + elif low >= high: + raise ValueError(f"`low` must be less than `high`, but got {low} >= {high}") + elif high < lowest_inclusive or low >= highest_exclusive: + raise ValueError( + f"The value interval specified by `low` and `high` is [{low}, {high}), " + f"but {dtype} only supports [{lowest_inclusive}, {highest_exclusive})" + ) + + low = clamp(low, lowest_inclusive, highest_exclusive) + high = clamp(high, lowest_inclusive, highest_exclusive) + + if dtype in _BOOLEAN_OR_INTEGRAL_TYPES: + # 1. `low` is ceiled to avoid creating values smaller than `low` and thus outside the specified interval + # 2. Following the same reasoning as for 1., `high` should be floored. However, the higher bound of + # `torch.randint` is exclusive, and thus we need to ceil here as well. + return math.ceil(low), math.ceil(high) + + return low, high + + if len(shape) == 1 and isinstance(shape[0], collections.abc.Sequence): + shape = shape[0] # type: ignore[assignment] + shape = cast(Tuple[int, ...], tuple(shape)) + + if noncontiguous and memory_format is not None: + raise ValueError( + f"The parameters `noncontiguous` and `memory_format` are mutually exclusive, " + f"but got {noncontiguous=} and {memory_format=}" + ) + + if requires_grad and dtype in _BOOLEAN_OR_INTEGRAL_TYPES: + raise ValueError( + f"`requires_grad=True` is not supported for boolean and integral dtypes, but got {dtype=}" + ) + + if dtype is torch.bool: + low, high = cast( + Tuple[int, int], + modify_low_high( + low, + high, + lowest_inclusive=0, + highest_exclusive=2, + default_low=0, + default_high=2, + ), + ) + result = torch.randint(low, high, shape, device=device, dtype=dtype) + elif dtype in _BOOLEAN_OR_INTEGRAL_TYPES: + low, high = cast( + Tuple[int, int], + modify_low_high( + low, + high, + lowest_inclusive=torch.iinfo(dtype).min, + highest_exclusive=torch.iinfo(dtype).max + # In theory, `highest_exclusive` should always be the maximum value + 1. However, `torch.randint` + # internally converts the bounds to an int64 and would overflow. In other words: `torch.randint` cannot + # sample 2**63 - 1, i.e. the maximum value of `torch.int64` and we need to account for that here. + + (1 if dtype is not torch.int64 else 0), + # This is incorrect for `torch.uint8`, but since we clamp to `lowest`, i.e. 0 for `torch.uint8`, + # _after_ we use the default value, we don't need to special case it here + default_low=-9, + default_high=10, + ), + ) + result = torch.randint(low, high, shape, device=device, dtype=dtype) + elif dtype in _FLOATING_OR_COMPLEX_TYPES: + low, high = modify_low_high( + low, + high, + lowest_inclusive=torch.finfo(dtype).min, + highest_exclusive=torch.finfo(dtype).max, + default_low=-9, + default_high=9, + ) + result = torch.empty(shape, device=device, dtype=dtype) + _uniform_random_( + torch.view_as_real(result) if dtype in _COMPLEX_TYPES else result, low, high + ) + elif dtype in _FLOATING_8BIT_TYPES: + low, high = modify_low_high( + low, + high, + lowest_inclusive=torch.finfo(dtype).min, + highest_exclusive=torch.finfo(dtype).max, + default_low=-9, + default_high=9, + ) + result = torch.empty(shape, device=device, dtype=torch.float32) + _uniform_random_(result, low, high) + result = result.to(dtype) + else: + raise TypeError( + f"The requested dtype '{dtype}' is not supported by torch.testing.make_tensor()." + " To request support, file an issue at: https://github.com/pytorch/pytorch/issues" + ) + + if noncontiguous and result.numel() > 1: + result = torch.repeat_interleave(result, 2, dim=-1) + result = result[..., ::2] + elif memory_format is not None: + result = result.clone(memory_format=memory_format) + + if exclude_zero: + result[result == 0] = ( + 1 if dtype in _BOOLEAN_OR_INTEGRAL_TYPES else torch.finfo(dtype).tiny + ) + + if dtype in _FLOATING_OR_COMPLEX_TYPES: + result.requires_grad = requires_grad + + return result diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py new file mode 100644 index 0000000000000000000000000000000000000000..e6b6dcfc0f40d8fa2dbf0c45f4e1862541787b9a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/autocast_test_lists.py @@ -0,0 +1,367 @@ +import torch +from torch.testing._internal.common_utils import TEST_WITH_ROCM + + +class AutocastTestLists: + def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype): + input = (torch.randn((n, n), device=dev, dtype=torch.float32),) + + hx = ((torch.randn((n, n), device=dev, dtype=torch.float32), + torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else + torch.randn((n, n), device=dev, dtype=torch.float32),) + + weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih + torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh + torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih + torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh + + # returns args as a tuple + return input + hx + weights + + # Supplies ops and arguments for test_autocast_* in test/test_cuda.py + def __init__(self, dev): + super().__init__() + n = 8 + # Utility arguments, created as one-element tuples + pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + pointwise2_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + mat0_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) + mat1_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) + mat2_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) + + dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n)) + conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev), + torch.randn(dimset, dtype=torch.float32, device=dev)) + for dimset in dimsets] + bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),) + element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),) + pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + + # The lists below organize ops that autocast needs to test. + # self.list_name corresponds to test_autocast_list_name in test/test_cuda.py. + # Each op is associated with a tuple of valid arguments. + # In addition, cudnn conv ops are not supported on ROCm and hence will + # be skipped by passing TEST_WITH_ROCM flag to those ops in self.torch_fp16 list. + + # Some ops implement built-in type promotion. These don't need autocasting, + # but autocasting relies on their promotion, so we include tests to double-check. + self.torch_expect_builtin_promote = [ + ("eq", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ge", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("gt", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("le", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("lt", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ne", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("add", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("div", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("mul", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("cat", (pointwise0_fp16 + pointwise1_fp32,), torch.float32), + ("equal", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("stack", (pointwise0_fp16 + pointwise1_fp32,), torch.float32), + ] + self.methods_expect_builtin_promote = [ + ("__eq__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ge__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__gt__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__le__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__lt__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ne__", pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__add__", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__div__", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__mul__", pointwise0_fp32 + pointwise1_fp16, torch.float32), + ] + + # The remaining lists organize ops that autocast treats explicitly. + self.torch_fp16 = [ + # deprecated _convolution + ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, + (0, 0), 1, False, True, True)), + # the current _convolution + ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, + (0, 0), 1, False, True, True, True)), + ("conv1d", conv_args_fp32[0]), + ("conv2d", conv_args_fp32[1]), + ("conv3d", conv_args_fp32[2]), + ("conv_tbc", conv_args_fp32[0] + bias_fp32), + ("conv_transpose1d", conv_args_fp32[0]), + ("conv_transpose2d", conv_args_fp32[1]), + ("conv_transpose3d", conv_args_fp32[2]), + ("convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1)), + ("cudnn_convolution", conv_args_fp32[1] + ((0, 0), (1, 1), (1, 1), 1, False, True, True), TEST_WITH_ROCM), + ("cudnn_convolution_transpose", conv_args_fp32[1] + ((0, 0), (0, 0), (1, 1), + (1, 1), 1, False, True, True), TEST_WITH_ROCM), + ("prelu", pointwise0_fp32 + element0_fp32), + ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32), + ("addmv", pointwise0_fp32 + mat2_fp32 + pointwise1_fp32), + ("addr", mat0_fp32 + pointwise0_fp32 + pointwise1_fp32), + ("matmul", mat0_fp32 + mat1_fp32), + ("einsum", "bkhd,bqhd->bqkh", mat0_fp32 + mat1_fp32), + ("mm", mat0_fp32 + mat1_fp32), + ("mv", mat0_fp32 + pointwise0_fp32), + ("chain_matmul", mat0_fp32 + mat1_fp32 + mat2_fp32), + ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + # _thnn_fused_lstm_cell and _thnn_fused_gru_cell are not Python-exposed as far as I can tell. + # ("_thnn_fused_lstm_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32), + # ("_thnn_fused_gru_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32), + ("lstm_cell", self._rnn_cell_args(n, num_chunks=4, is_lstm=True, dev=dev, dtype=torch.float32)), + ("gru_cell", self._rnn_cell_args(n, num_chunks=3, is_lstm=False, dev=dev, dtype=torch.float32)), + ("rnn_tanh_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)), + ("rnn_relu_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)), + ] + self.torch_fp32 = [ + ("acos", (pointwise0_fp16[0].clamp(-.9, 0.9),)), + ("asin", (pointwise0_fp16[0].clamp(-.9, 0.9),)), + ("cosh", pointwise0_fp16), + ("erfinv", (pointwise0_fp16[0].clamp(-.9, .9),)), + ("exp", pointwise0_fp16), + ("expm1", pointwise0_fp16), + ("log", (pointwise0_fp16[0].clamp(0.1, 100.0),)), + ("log10", (pointwise0_fp16[0].clamp(0.1, 100.0),)), + ("log2", (pointwise0_fp16[0].clamp(0.1, 100.0),)), + ("log1p", (pointwise0_fp16[0].clamp(-0.9, 100.0),)), + ("reciprocal", pointwise0_fp16), + ("rsqrt", (pointwise0_fp16[0].clamp(0.0, 100.0),)), + ("sinh", pointwise0_fp16), + ("tan", (pointwise0_fp16[0].clamp(-3.1 / 2, 3.1 / 2),)), + ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + pointwise1_fp16), + ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + (1.7,)), + # ("pow", (1.7,) + pointwise0_fp16), # This variant has a backend, but is not documented in the API. + ("softmax", pointwise0_fp16 + (0,)), + ("log_softmax", pointwise0_fp16 + (0,)), + ("layer_norm", pointwise0_fp16 + ((pointwise0_fp16[0].numel(),),)), + ("group_norm", mat0_fp16 + (1,)), + ("norm", pointwise0_fp16), + ("norm", pointwise0_fp16, {"dim": 0}), + # these need magma + # ("norm", mat0_fp16, {"p": "nuc"}), + # ("norm", mat0_fp16, {"p": "nuc", "dim": 0}), + ("norm", pointwise0_fp16, {"p": 1}), + ("norm", pointwise0_fp16, {"p": 1, "dim": 0}), + ("cosine_similarity", mat0_fp16 + mat1_fp16), + ("poisson_nll_loss", mat0_fp16 + mat1_fp16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))), + ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.float16), + torch.tensor([[1, 3, 4]], device=dev, dtype=torch.float16), + torch.tensor([1], device=dev, dtype=torch.int))), + ("hinge_embedding_loss", mat0_fp16 + (torch.ones(n, device=dev, dtype=torch.int),)), + ("kl_div", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)), + ("margin_ranking_loss", mat0_fp16 + mat1_fp16 + (torch.ones((n,), device=dev, dtype=torch.float16),)), + ("triplet_margin_loss", mat0_fp16 + mat1_fp16 + mat2_fp16), + ("binary_cross_entropy_with_logits", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)), + ("cumprod", pointwise0_fp16 + (0,)), + ("cumsum", pointwise0_fp16 + (0,)), + ("dist", pointwise0_fp16 + pointwise1_fp16), + ("pdist", mat0_fp16), + ("cdist", mat0_fp16 + mat1_fp16), + ("prod", pointwise0_fp16), + ("prod", pointwise0_fp16 + (0,)), + ("renorm", mat0_fp16 + (2, 0, 1.0)), + ("sum", pointwise0_fp16), + ("sum", mat0_fp16 + (1,)), + ("logsumexp", mat0_fp16 + (1,)), + ] + self.torch_need_autocast_promote = [ + ("addcdiv", pointwise0_fp32 + pointwise1_fp16 + (pointwise2_fp16[0].clamp(0.1, 100),)), + ("addcmul", pointwise0_fp32 + pointwise1_fp16 + pointwise2_fp16), + ("atan2", pointwise0_fp32 + (pointwise1_fp16[0].clamp(0.1, 100),)), + ("bilinear", (torch.randn((1, 2), dtype=torch.float16, device=dev), + torch.randn((1, 2), dtype=torch.float32, device=dev), + torch.randn((1, 2, 2), dtype=torch.float16, device=dev), + torch.randn((1,), dtype=torch.float32, device=dev))), + ("cross", (torch.randn(3, dtype=torch.float32, device=dev), + torch.randn(3, dtype=torch.float16, device=dev))), + ("dot", pointwise0_fp16 + pointwise1_fp32), + ("grid_sampler", (torch.randn((2, 3, 33, 22), dtype=torch.float16, device=dev), + torch.randn((2, 22, 11, 2), dtype=torch.float32, device=dev), + 0, 0, False)), + ("index_put", pointwise0_fp32 + ((torch.tensor([1], device=dev, dtype=torch.long),), + torch.randn(1, device=dev, dtype=torch.float16))), + ("index_put", pointwise0_fp16 + ((torch.tensor([1], device=dev, dtype=torch.long),), + torch.randn(1, device=dev, dtype=torch.float32))), + ("tensordot", (torch.randn((2, 2, 2), dtype=torch.float32, device=dev), + torch.randn((2, 2, 2), dtype=torch.float16, device=dev))), + ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float32, device=dev), + 0, + torch.randint(0, 2, (2, 2, 2), device=dev), + torch.randn((2, 2, 2), dtype=torch.float16, device=dev))), + ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float16, device=dev), + 0, + torch.randint(0, 2, (2, 2, 2), device=dev), + torch.randn((2, 2, 2), dtype=torch.float32, device=dev))), + ] + self.nn_fp16 = [ + ("linear", mat0_fp32 + mat1_fp32 + mat2_fp32), + ] + self.nn_fp32 = [ + ("softplus", pointwise0_fp16), + ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.float), + torch.zeros((n,), device=dev, dtype=torch.long))), + ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.half), + torch.zeros((n, n, n), device=dev, dtype=torch.long))), + ("l1_loss", mat0_fp16 + mat1_fp16), + ("smooth_l1_loss", mat0_fp16 + mat1_fp16), + ("mse_loss", mat0_fp16 + mat1_fp16), + ("multilabel_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("soft_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("multi_margin_loss", mat0_fp16 + (torch.ones((n,), device=dev, dtype=torch.long),)), + ] + self.linalg_fp16 = [ + ("linalg_vecdot", mat0_fp32 + mat0_fp32), + ("linalg_multi_dot", (mat0_fp32 + mat1_fp32 + mat2_fp32,)), + ] + self.methods_fp16 = [ + ("__matmul__", mat0_fp32 + mat1_fp32) + ] + self.methods_fp32 = [ + ("__pow__", (torch.rand(n, device=dev, dtype=torch.float16), 1.5)), + ] + self.banned = [ + ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.float32), + torch.rand((n, n), device=dev, dtype=torch.float32)), torch._C._nn), + ] + +class AutocastCPUTestLists: + # Supplies ops and arguments for test_autocast_* in test/test_cpu.py + def __init__(self, dev): + super().__init__() + n = 8 + # Utility arguments, created as one-element tuples + pointwise0_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) + pointwise1_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) + pointwise2_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) + mat0_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) + mat1_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) + mat2_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) + + pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) + + dummy_dimsets = ((n,), (n, n), (n, n, n), (n, n, n, n), (n, n, n, n, n)) + + dummy_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),) + for dimset in dummy_dimsets] + + dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n)) + conv_args_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev), + torch.randn(dimset, dtype=torch.bfloat16, device=dev)) + for dimset in dimsets] + conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev), + torch.randn(dimset, dtype=torch.float32, device=dev)) + for dimset in dimsets] + + bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),) + element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),) + pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) + mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) + + dummy_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),) + for dimset in dummy_dimsets] + # The lists below organize ops that autocast needs to test. + # self.list_name corresponds to test_autocast_list_name in test/test_cpu.py. + # Each op is associated with a tuple of valid arguments. + + # Some ops implement built-in type promotion. These don't need autocasting, + # but autocasting relies on their promotion, so we include tests to double-check. + self.torch_expect_builtin_promote = [ + ("eq", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ge", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("gt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("le", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("lt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("ne", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("add", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("div", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("mul", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ] + + self.methods_expect_builtin_promote = [ + ("__eq__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ge__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__gt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__le__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__lt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__ne__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), + ("__add__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__div__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ("__mul__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), + ] + # The remaining lists organize ops that autocast treats explicitly. + self.torch_16 = [ + ("conv1d", conv_args_fp32[0]), + ("conv2d", conv_args_fp32[1]), + ("conv3d", conv_args_fp32[2]), + ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("mm", mat0_fp32 + mat1_fp32), + ("matmul", mat0_fp32 + mat1_fp32), + ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32), + ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32))), + ("conv_tbc", (torch.randn((10, 7, 3), device=dev, dtype=torch.float32), + torch.randn((5, 3, 5), device=dev, dtype=torch.float32), + torch.randn(5, device=dev, dtype=torch.float32), + 0)), + ("conv_transpose1d", conv_args_fp32[0]), + ("conv_transpose2d", conv_args_fp32[1]), + ("conv_transpose3d", conv_args_fp32[2]), + ("prelu", pointwise0_fp32 + element0_fp32), + ("_native_multi_head_attention", (torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + torch.randn((n, n, n), device=dev, dtype=torch.float32), + n, 4, torch.randn((3 * n, n), device=dev, dtype=torch.float32), + torch.randn((3 * n), device=dev, dtype=torch.float32), + torch.randn((n, n), device=dev, dtype=torch.float32), + torch.randn((n), device=dev, dtype=torch.float32))), + ] + self.torch_fp32 = [ + ("poisson_nll_loss", mat0_bf16 + mat1_bf16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))), + ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.bfloat16), + torch.tensor([[1, 3, 4]], device=dev, dtype=torch.bfloat16), + torch.tensor([1], device=dev, dtype=torch.int))), + ("hinge_embedding_loss", mat0_bf16 + (torch.ones(n, device=dev, dtype=torch.int),)), + ("margin_ranking_loss", mat0_bf16 + mat1_bf16 + (torch.ones((n,), device=dev, dtype=torch.bfloat16),)), + ("triplet_margin_loss", mat0_bf16 + mat1_bf16 + mat2_bf16), + ("binary_cross_entropy_with_logits", mat0_bf16 + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)), + ] + self.nn_16 = [ + ("linear", mat0_fp32 + mat1_fp32, {}), + ] + self.nn_fp32 = [ + ("avg_pool3d", dummy_bf16[3], {"kernel_size": (3, 3, 3), "stride": (1, 1, 1)}), + ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),) + + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)), + ("reflection_pad1d", dummy_bf16[2], {"padding": (3, 3)}), + ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.bfloat16), + torch.zeros((n,), device=dev, dtype=torch.long))), + ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.bfloat16), + torch.zeros((n, n, n), device=dev, dtype=torch.long))), + ("l1_loss", mat0_bf16 + mat1_bf16), + ("smooth_l1_loss", mat0_bf16 + mat1_bf16), + ("mse_loss", mat0_bf16 + mat1_bf16), + ("multilabel_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("soft_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), + ("multi_margin_loss", mat0_bf16 + (torch.ones((n,), device=dev, dtype=torch.long),)), + ("huber_loss", mat0_bf16 + mat1_bf16), + ] + self.torch_need_autocast_promote = [ + ("cat", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)), + ("stack", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)), + ] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py new file mode 100644 index 0000000000000000000000000000000000000000..0a16cae3aca3f53c87e5f967f8d064e08a53df60 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/autograd_function_db.py @@ -0,0 +1,630 @@ +import torch +from functools import partial +from torch.testing import make_tensor +from torch.testing._internal.opinfo.core import ( + OpInfo, + SampleInput, +) +from torch.testing._internal.common_dtype import all_types_and +import numpy as np + +# Note: [autograd.Function db] +# +# This is a collection of autograd.Function test cases written as OpInfos +# so they can easily be consumed by OpInfo-based tests to check if a subsystem +# supports autograd.Function. +# +# Axes: +# - saves {output, input, intermediate, non-tensor} +# - {inputs, output} x {single tensor, tensors, arbitrary objects} +# - Uses {mark_dirty, mark_non_differentiable, once_differentiable} + + +def to_numpy(tensor): + return tensor.cpu().numpy() + + +class NumpyCube(torch.autograd.Function): + @staticmethod + def forward(input): + input_np = to_numpy(input) + dinput = torch.tensor(3 * input_np ** 2, device=input.device) + return torch.tensor(input_np ** 3, device=input.device), dinput + + @staticmethod + def setup_context(ctx, inputs, output): + ctx.save_for_backward(inputs[0], output[1]) + ctx.save_for_forward(inputs[0], output[1]) + + @staticmethod + def backward(ctx, grad_output, grad_saved): + input, dinput = ctx.saved_tensors + return NumpyMul.apply(grad_output, dinput) + 6 * NumpyMul.apply(grad_saved, input) + + @staticmethod + def vmap(info, in_dims, input): + result = NumpyCube.apply(input) + return result, (in_dims[0], in_dims[0]) + + @staticmethod + def jvp(ctx, input_tangent): + input, dinput = ctx.saved_tensors + return NumpyMul.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input) + + +class CubeGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x): + return x ** 3, 3 * x ** 2 + + @staticmethod + def setup_context(ctx, inputs, outputs): + ctx.save_for_backward(inputs[0], outputs[1]) + ctx.save_for_forward(inputs[0], outputs[1]) + + @staticmethod + def backward(ctx, grad_output, grad_saved): + input, dinput = ctx.saved_tensors + result = grad_output * dinput + 6 * dinput + return result + + @staticmethod + def jvp(ctx, input_tangent): + input, dinput = ctx.saved_tensors + return MulGenVmap.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input) + + +def sample_inputs_numpy_cube(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(1, low=0.8, high=2), args=()) + + +class NumpyCubeNotComposable(torch.autograd.Function): + @staticmethod + def forward(input): + input_np = to_numpy(input) + return torch.tensor(input_np ** 3, device=input.device), input_np + + @staticmethod + def setup_context(ctx, inputs, output): + _, input_np = output + ctx.input_np = input_np + ctx.device = inputs[0].device + + @staticmethod + @torch.autograd.function.once_differentiable + def backward(ctx, grad_output, grad_saved): + result_np = 3 * (ctx.input_np ** 2) + return torch.tensor(result_np, device=ctx.device) + + +class NumpyMul(torch.autograd.Function): + @staticmethod + def forward(x, y): + return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device) + + @staticmethod + def setup_context(ctx, inputs, output): + ctx.save_for_backward(*inputs) + ctx.save_for_forward(*inputs) + + @staticmethod + def backward(ctx, grad_output): + x, y = ctx.saved_tensors + gx = None + if ctx.needs_input_grad[0]: + gx = NumpyMul.apply(grad_output, y) + gy = None + if ctx.needs_input_grad[1]: + gy = NumpyMul.apply(grad_output, x) + return gx, gy + + @staticmethod + def vmap(info, in_dims, x, y): + x_bdim, y_bdim = in_dims + x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1) + y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1) + result = NumpyMul.apply(x, y) + result = result.movedim(-1, 0) + return result, 0 + + @staticmethod + def jvp(ctx, x_tangent, y_tangent): + x, y = ctx.saved_tensors + return x_tangent * y + y_tangent * x + +def sample_inputs_numpy_mul(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # Broadcasting + yield SampleInput(make_arg(4, low=0.9, high=2), args=(make_arg(3, 4, low=0.9, high=2),)) + + +class MulGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, y): + return x * y + + @staticmethod + def setup_context(ctx, inputs, outputs): + ctx.save_for_backward(*inputs) + ctx.save_for_forward(*inputs) + + @staticmethod + def backward(ctx, grad_output): + x, y = ctx.saved_tensors + gx = None + if ctx.needs_input_grad[0]: + gx = MulGenVmap.apply(grad_output, y) + gy = None + if ctx.needs_input_grad[1]: + gy = MulGenVmap.apply(grad_output, x) + return gx, gy + + @staticmethod + def jvp(ctx, x_tangent, y_tangent): + x, y = ctx.saved_tensors + return x_tangent * y + y_tangent * x + + +class NumpyExp_(torch.autograd.Function): + @staticmethod + def forward(x): + x_np = to_numpy(x) + np.exp(x_np, x_np) + return x + + @staticmethod + def setup_context(ctx, inputs, output): + x, = inputs + ctx.mark_dirty(x) + ctx.save_for_backward(output) + ctx.save_for_forward(output) + + @staticmethod + def backward(ctx, grad_output): + output, = ctx.saved_tensors + return NumpyMul.apply(grad_output, output) + + @staticmethod + def vmap(info, in_dims, x): + NumpyExp_.apply(x) + return x, in_dims[0] + + @staticmethod + def jvp(ctx, x_tangent): + # Doesn't call numpy operations because I didn't want to write NumpyMul_ + output, = ctx.saved_tensors + x_tangent.mul_(output) + return x_tangent + +class NumpySort(torch.autograd.Function): + @staticmethod + def forward(x, dim): + device = x.device + x = to_numpy(x) + ind = np.argsort(x, axis=dim) + ind_inv = np.argsort(ind, axis=dim) + result = np.take_along_axis(x, ind, axis=dim) + return ( + torch.tensor(x, device=device), + torch.tensor(ind, device=device), + torch.tensor(ind_inv, device=device), + ) + + @staticmethod + def setup_context(ctx, inputs, output): + x, dim = inputs + _, ind, ind_inv = output + ctx.mark_non_differentiable(ind, ind_inv) + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output, _0, _1): + ind, ind_inv = ctx.saved_tensors + return NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim), None + + @staticmethod + def vmap(info, in_dims, x, dim): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 0) + # wrap dim + dim = dim if dim >= 0 else dim + x.dim() - 1 + return NumpySort.apply(x, dim + 1), (0, 0, 0) + + @staticmethod + def jvp(ctx, x_tangent, _): + ind, ind_inv = ctx.saved_tensors + return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim), None, None + +class SortGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, dim): + device = x.device + ind = torch.argsort(x, dim=dim) + ind_inv = torch.argsort(ind, axis=dim) + result = torch.take_along_dim(x, ind, dim=dim) + return result, ind, ind_inv + + @staticmethod + def setup_context(ctx, inputs, outputs): + x, dim = inputs + _, ind, ind_inv = outputs + ctx.mark_non_differentiable(ind, ind_inv) + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output, _0, _1): + ind, ind_inv = ctx.saved_tensors + return TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim), None + + @staticmethod + def jvp(ctx, x_tangent, _): + ind, ind_inv = ctx.saved_tensors + return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim), None, None + + +def sample_inputs_numpy_sort(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(3, 5), args=(1,)) + + +def sample_inputs_numpy_take(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + tensor = make_arg(3, 5) + dim = 1 + _, ind, ind_inv = NumpySort.apply(tensor, 1) + yield SampleInput(tensor, args=(ind, ind_inv, dim)) + + +class NumpyTake(torch.autograd.Function): + @staticmethod + def forward(x, ind, ind_inv, dim): + device = x.device + x = to_numpy(x) + ind = to_numpy(ind) + return torch.tensor(np.take_along_axis(x, ind, dim), device=device) + + @staticmethod + def setup_context(ctx, inputs, output): + x, ind, ind_inv, dim = inputs + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output): + ind, ind_inv = ctx.saved_tensors + result = NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim) + return result, None, None, None + + @staticmethod + def vmap(info, in_dims, x, ind, ind_inv, dim): + x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims + + # wrap dim + logical_dim = x.dim() if x_bdim is None else x_bdim - 1 + dim = dim if dim >= 0 else dim + logical_dim + + def expand_bdim(x, x_bdim): + if x_bdim is None: + return x.expand(info.batch_size, *x.shape) + return x.movedim(x_bdim, 0) + + x = expand_bdim(x, x_bdim) + ind = expand_bdim(ind, ind_bdim) + ind_inv = expand_bdim(ind_inv, ind_inv_bdim) + + return NumpyTake.apply(x, ind, ind_inv, dim + 1), 0 + + @staticmethod + def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _): + assert ind_tangent is None + assert ind_inv_tangent is None + ind, ind_inv = ctx.saved_tensors + return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim) + +class TakeGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, ind, ind_inv, dim): + return torch.take_along_dim(x, ind, dim) + + @staticmethod + def setup_context(ctx, inputs, outputs): + x, ind, ind_inv, dim = inputs + ctx.save_for_backward(ind, ind_inv) + ctx.save_for_forward(ind, ind_inv) + ctx.dim = dim + + @staticmethod + def backward(ctx, grad_output): + ind, ind_inv = ctx.saved_tensors + result = TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim) + return result, None, None, None + + @staticmethod + def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _): + ind, ind_inv = ctx.saved_tensors + return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim) + +class Select(torch.autograd.Function): + @staticmethod + def forward(x, idx): + return x[idx] + + @staticmethod + def setup_context(ctx, inputs, output): + x, idx = inputs + ctx.x_shape = x.shape + ctx.idx = idx + + @staticmethod + def backward(ctx, grad_output): + result = grad_output.new_zeros(ctx.x_shape) + result[ctx.idx] = grad_output + return result, None + + @staticmethod + def vmap(info, in_dims, x, idx): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 1) + return Select.apply(x, idx), 0 + + @staticmethod + def jvp(ctx, x_tangent, _): + return Select.apply(x_tangent, ctx.idx) + +class SelectGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, idx): + return x[idx] + + @staticmethod + def setup_context(ctx, inputs, outputs): + x, idx = inputs + ctx.x_shape = x.shape + ctx.idx = idx + + @staticmethod + def backward(ctx, grad_output): + result = grad_output.new_zeros(ctx.x_shape) + result[ctx.idx] = grad_output + return result, None + + @staticmethod + def jvp(ctx, x_tangent, _): + return SelectGenVmap.apply(x_tangent, ctx.idx) + + +def sample_inputs_select(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(3, 5), args=(2,)) + +class ScaleGradGenVmap(torch.autograd.Function): + generate_vmap_rule = True + scale = 3.14 + + @staticmethod + def forward(x): + return x.clone() + + @staticmethod + def setup_context(ctx, inputs, outputs): + pass + + @staticmethod + def backward(ctx, grad_output): + return grad_output * ScaleGradGenVmap.scale + + @staticmethod + def jvp(ctx, x_tangent): + return x_tangent * ScaleGradGenVmap.scale + +class ZeroGradientsGenVmap(torch.autograd.Function): + generate_vmap_rule = True + + @staticmethod + def forward(x, y): + return x.clone(), y.clone() + + @staticmethod + def setup_context(ctx, inputs, outputs): + pass + + @staticmethod + def backward(ctx, gx, gy): + # Intentionally returning torch.zeros instead of zeros_like or new_zeros. + # Also intentionally not None. + return ( + # Intentionally too-large gradient + torch.zeros(3, 4, *gx.shape, dtype=gx.dtype, device=gx.device), + torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device), + ) + + @staticmethod + def jvp(ctx, gx, gy): + # Intentionally returning torch.zeros instead of zeros_like or new_zeros. + # Also intentionally not None. + return ( + torch.zeros(gx.shape, dtype=gx.dtype, device=gx.device), + torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device), + ) + + +def sample_inputs_forward_default_args(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(3, 5)) + + +class ForwardHasDefaultArgs(torch.autograd.Function): + @staticmethod + def forward(x, idx=(2,)): + return x[idx] + + @staticmethod + def setup_context(ctx, inputs, output): + x, idx = inputs + ctx.x_shape = x.shape + ctx.idx = idx + + @staticmethod + def backward(ctx, grad_output): + result = grad_output.new_zeros(ctx.x_shape) + result[ctx.idx] = grad_output + return result, None + + @staticmethod + def vmap(info, in_dims, x, idx): + x_bdim, _ = in_dims + x = x.movedim(x_bdim, 1) + return ForwardHasDefaultArgs.apply(x, idx), 0 + + @staticmethod + def jvp(ctx, x_tangent, _): + return ForwardHasDefaultArgs.apply(x_tangent, ctx.idx) + + +autograd_function_db = [ + OpInfo( + 'NumpyCubeAutogradFunction', + op=NumpyCube.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyExpMarkDirtyAutogradFunction', + op=lambda x: NumpyExp_.apply(x.clone()), + inplace_variant=NumpyExp_.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyMulAutogradFunction', + op=NumpyMul.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyCubeNotComposableAutogradFunction', + op=lambda x: NumpyCubeNotComposable.apply(x)[0], + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpySortAutogradFunction', + op=NumpySort.apply, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_numpy_sort, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + gradcheck_wrapper=lambda y, ind: y, + ), + OpInfo( + 'NumpyTakeAutogradFunction', + op=NumpyTake.apply, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_numpy_take, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'SelectAutogradFunction', + op=Select.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_select, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'CubeGenVmapAutogradFunction', + op=CubeGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'MulGenVmapAutogradFunction', + op=MulGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'SortGenVmapAutogradFunction', + op=SortGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_sort, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + gradcheck_wrapper=lambda y, ind: y, + ), + OpInfo( + 'SelectGenVmapAutogradFunction', + op=SelectGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_select, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'ScaleGradGenVmapAutogradFunction', + op=ScaleGradGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'ZeroGradientsGenVmapAutogradFunction', + op=ZeroGradientsGenVmap.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'ForwardHasDefaultArgsAutogradFunction', + op=ForwardHasDefaultArgs.apply, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_forward_default_args, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py new file mode 100644 index 0000000000000000000000000000000000000000..131ea461ce544a4900d0789a4d206b45f6730d20 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/check_kernel_launches.py @@ -0,0 +1,163 @@ +import os +import re +import sys +from typing import List + +__all__ = [ + "check_code_for_cuda_kernel_launches", + "check_cuda_kernel_launches", +] + +# FILES TO EXCLUDE (match is done with suffix using `endswith`) +# You wouldn't drive without a seatbelt, though, so why would you +# launch a kernel without some safety? Use this as a quick workaround +# for a problem with the checker, fix the checker, then de-exclude +# the files in question. +exclude_files: List[str] = [] + +# Without using a C++ AST we can't 100% detect kernel launches, so we +# model them as having the pattern "<<>>(arguments);" +# We then require that `C10_CUDA_KERNEL_LAUNCH_CHECK` be +# the next statement. +# +# We model the next statement as ending at the next `}` or `;`. +# If we see `}` then a clause ended (bad) if we see a semi-colon then +# we expect the launch check just before it. +# +# Since the kernel launch can include lambda statements, it's important +# to find the correct end-paren of the kernel launch. Doing this with +# pure regex requires recursive regex, which aren't part of the Python +# standard library. To avoid an additional dependency, we build a prefix +# regex that finds the start of a kernel launch, use a paren-matching +# algorithm to find the end of the launch, and then another regex to +# determine if a launch check is present. + +# Finds potential starts of kernel launches +kernel_launch_start = re.compile( + r"^.*<<<[^>]+>>>\s*\(", flags=re.MULTILINE +) + +# This pattern should start at the character after the final paren of the +# kernel launch. It returns a match if the launch check is not the next statement +has_check = re.compile( + r"\s*;(?![^;}]*C10_CUDA_KERNEL_LAUNCH_CHECK\(\);)", flags=re.MULTILINE +) + +def find_matching_paren(s: str, startpos: int) -> int: + """Given a string "prefix (unknown number of characters) suffix" + and the position of the first `(` returns the index of the character + 1 past the `)`, accounting for paren nesting + """ + opening = 0 + for i, c in enumerate(s[startpos:]): + if c == '(': + opening += 1 + elif c == ')': + opening -= 1 + if opening == 0: + return startpos + i + 1 + + raise IndexError("Closing parens not found!") + + +def should_exclude_file(filename) -> bool: + for exclude_suffix in exclude_files: + if filename.endswith(exclude_suffix): + return True + return False + + +def check_code_for_cuda_kernel_launches(code, filename=None): + """Checks code for CUDA kernel launches without cuda error checks. + + Args: + filename - Filename of file containing the code. Used only for display + purposes, so you can put anything here. + code - The code to check + + Returns: + The number of unsafe kernel launches in the code + """ + if filename is None: + filename = "##Python Function Call##" + + # We break the code apart and put it back together to add + # helpful line numberings for identifying problem areas + code = enumerate(code.split("\n")) # Split by line breaks + code = [f"{lineno}: {linecode}" for lineno, linecode in code] # Number the lines + code = '\n'.join(code) # Put it back together + + num_launches_without_checks = 0 + for m in kernel_launch_start.finditer(code): + end_paren = find_matching_paren(code, m.end() - 1) + if has_check.match(code, end_paren): + num_launches_without_checks += 1 + context = code[m.start():end_paren + 1] + print(f"Missing C10_CUDA_KERNEL_LAUNCH_CHECK in '{filename}'. Context:\n{context}", file=sys.stderr) + + return num_launches_without_checks + + +def check_file(filename): + """Checks a file for CUDA kernel launches without cuda error checks + + Args: + filename - File to check + + Returns: + The number of unsafe kernel launches in the file + """ + if not (filename.endswith((".cu", ".cuh"))): + return 0 + if should_exclude_file(filename): + return 0 + with open(filename) as fo: + contents = fo.read() + unsafeCount = check_code_for_cuda_kernel_launches(contents, filename) + return unsafeCount + + +def check_cuda_kernel_launches(): + """Checks all pytorch code for CUDA kernel launches without cuda error checks + + Returns: + The number of unsafe kernel launches in the codebase + """ + torch_dir = os.path.dirname(os.path.realpath(__file__)) + torch_dir = os.path.dirname(torch_dir) # Go up to parent torch + torch_dir = os.path.dirname(torch_dir) # Go up to parent caffe2 + + kernels_without_checks = 0 + files_without_checks = [] + for root, dirnames, filenames in os.walk(torch_dir): + # `$BASE/build` and `$BASE/torch/include` are generated + # so we don't want to flag their contents + if root == os.path.join(torch_dir, "build") or root == os.path.join(torch_dir, "torch/include"): + # Curtail search by modifying dirnames and filenames in place + # Yes, this is the way to do this, see `help(os.walk)` + dirnames[:] = [] + continue + + for x in filenames: + filename = os.path.join(root, x) + file_result = check_file(filename) + if file_result > 0: + kernels_without_checks += file_result + files_without_checks.append(filename) + + if kernels_without_checks > 0: + count_str = f"Found {kernels_without_checks} instances in " \ + f"{len(files_without_checks)} files where kernel " \ + "launches didn't have checks." + print(count_str, file=sys.stderr) + print("Files without checks:", file=sys.stderr) + for x in files_without_checks: + print(f"\t{x}", file=sys.stderr) + print(count_str, file=sys.stderr) + + return kernels_without_checks + + +if __name__ == "__main__": + unsafe_launches = check_cuda_kernel_launches() + sys.exit(0 if unsafe_launches == 0 else 1) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py new file mode 100644 index 0000000000000000000000000000000000000000..2b91673720bc614783c0793cb4e06bddf4ad526d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_cuda.py @@ -0,0 +1,247 @@ +r"""This file is allowed to initialize CUDA context when imported.""" + +import functools +import torch +import torch.cuda +from torch.testing._internal.common_utils import LazyVal, TEST_NUMBA, TEST_WITH_ROCM, TEST_CUDA, IS_WINDOWS +import inspect +import contextlib + + +CUDA_ALREADY_INITIALIZED_ON_IMPORT = torch.cuda.is_initialized() + + +TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2 +CUDA_DEVICE = torch.device("cuda:0") if TEST_CUDA else None +# note: if ROCm is targeted, TEST_CUDNN is code for TEST_MIOPEN +if TEST_WITH_ROCM: + TEST_CUDNN = LazyVal(lambda: TEST_CUDA) +else: + TEST_CUDNN = LazyVal(lambda: TEST_CUDA and torch.backends.cudnn.is_acceptable(torch.tensor(1., device=CUDA_DEVICE))) + +TEST_CUDNN_VERSION = LazyVal(lambda: torch.backends.cudnn.version() if TEST_CUDNN else 0) + +SM53OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (5, 3)) +SM60OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (6, 0)) +SM70OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 0)) +SM75OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 5)) +SM80OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0)) +SM90OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (9, 0)) + +PLATFORM_SUPPORTS_FLASH_ATTENTION: bool = LazyVal(lambda: TEST_CUDA and (not TEST_WITH_ROCM) and (not IS_WINDOWS) and SM80OrLater) +PLATFORM_SUPPORTS_MEM_EFF_ATTENTION: bool = LazyVal(lambda: TEST_CUDA and not TEST_WITH_ROCM) +# This condition always evaluates to PLATFORM_SUPPORTS_MEM_EFF_ATTENTION but for logical clarity we keep it separate +PLATFORM_SUPPORTS_FUSED_ATTENTION: bool = LazyVal(lambda: PLATFORM_SUPPORTS_FLASH_ATTENTION or PLATFORM_SUPPORTS_MEM_EFF_ATTENTION) + +PLATFORM_SUPPORTS_FUSED_SDPA: bool = TEST_CUDA and not TEST_WITH_ROCM + +if TEST_NUMBA: + try: + import numba.cuda + TEST_NUMBA_CUDA = numba.cuda.is_available() + except Exception as e: + TEST_NUMBA_CUDA = False + TEST_NUMBA = False +else: + TEST_NUMBA_CUDA = False + +# Used below in `initialize_cuda_context_rng` to ensure that CUDA context and +# RNG have been initialized. +__cuda_ctx_rng_initialized = False + + +# after this call, CUDA context and RNG must have been initialized on each GPU +def initialize_cuda_context_rng(): + global __cuda_ctx_rng_initialized + assert TEST_CUDA, 'CUDA must be available when calling initialize_cuda_context_rng' + if not __cuda_ctx_rng_initialized: + # initialize cuda context and rng for memory tests + for i in range(torch.cuda.device_count()): + torch.randn(1, device=f"cuda:{i}") + __cuda_ctx_rng_initialized = True + + +# Test whether hardware TF32 math mode enabled. It is enabled only on: +# - CUDA >= 11 +# - arch >= Ampere +def tf32_is_not_fp32(): + if not torch.cuda.is_available() or torch.version.cuda is None: + return False + if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: + return False + if int(torch.version.cuda.split('.')[0]) < 11: + return False + return True + + +@contextlib.contextmanager +def tf32_off(): + old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32 + try: + torch.backends.cuda.matmul.allow_tf32 = False + with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False): + yield + finally: + torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul + + +@contextlib.contextmanager +def tf32_on(self, tf32_precision=1e-5): + old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32 + old_precision = self.precision + try: + torch.backends.cuda.matmul.allow_tf32 = True + self.precision = tf32_precision + with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True): + yield + finally: + torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul + self.precision = old_precision + + +# This is a wrapper that wraps a test to run this test twice, one with +# allow_tf32=True, another with allow_tf32=False. When running with +# allow_tf32=True, it will use reduced precision as specified by the +# argument. For example: +# @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128) +# @tf32_on_and_off(0.005) +# def test_matmul(self, device, dtype): +# a = ...; b = ...; +# c = torch.matmul(a, b) +# self.assertEqual(c, expected) +# In the above example, when testing torch.float32 and torch.complex64 on CUDA +# on a CUDA >= 11 build on an >=Ampere architecture, the matmul will be running at +# TF32 mode and TF32 mode off, and on TF32 mode, the assertEqual will use reduced +# precision to check values. +# +# This decorator can be used for function with or without device/dtype, such as +# @tf32_on_and_off(0.005) +# def test_my_op(self) +# @tf32_on_and_off(0.005) +# def test_my_op(self, device) +# @tf32_on_and_off(0.005) +# def test_my_op(self, device, dtype) +# @tf32_on_and_off(0.005) +# def test_my_op(self, dtype) +# if neither device nor dtype is specified, it will check if the system has ampere device +# if device is specified, it will check if device is cuda +# if dtype is specified, it will check if dtype is float32 or complex64 +# tf32 and fp32 are different only when all the three checks pass +def tf32_on_and_off(tf32_precision=1e-5): + def with_tf32_disabled(self, function_call): + with tf32_off(): + function_call() + + def with_tf32_enabled(self, function_call): + with tf32_on(self, tf32_precision): + function_call() + + def wrapper(f): + params = inspect.signature(f).parameters + arg_names = tuple(params.keys()) + + @functools.wraps(f) + def wrapped(*args, **kwargs): + for k, v in zip(arg_names, args): + kwargs[k] = v + cond = tf32_is_not_fp32() + if 'device' in kwargs: + cond = cond and (torch.device(kwargs['device']).type == 'cuda') + if 'dtype' in kwargs: + cond = cond and (kwargs['dtype'] in {torch.float32, torch.complex64}) + if cond: + with_tf32_disabled(kwargs['self'], lambda: f(**kwargs)) + with_tf32_enabled(kwargs['self'], lambda: f(**kwargs)) + else: + f(**kwargs) + + return wrapped + return wrapper + + +# This is a wrapper that wraps a test to run it with TF32 turned off. +# This wrapper is designed to be used when a test uses matmul or convolutions +# but the purpose of that test is not testing matmul or convolutions. +# Disabling TF32 will enforce torch.float tensors to be always computed +# at full precision. +def with_tf32_off(f): + @functools.wraps(f) + def wrapped(*args, **kwargs): + with tf32_off(): + return f(*args, **kwargs) + + return wrapped + +def _get_magma_version(): + if 'Magma' not in torch.__config__.show(): + return (0, 0) + position = torch.__config__.show().find('Magma ') + version_str = torch.__config__.show()[position + len('Magma '):].split('\n')[0] + return tuple(int(x) for x in version_str.split(".")) + +def _get_torch_cuda_version(): + if torch.version.cuda is None: + return (0, 0) + cuda_version = str(torch.version.cuda) + return tuple(int(x) for x in cuda_version.split(".")) + +def _get_torch_rocm_version(): + if not TEST_WITH_ROCM: + return (0, 0) + rocm_version = str(torch.version.hip) + rocm_version = rocm_version.split("-")[0] # ignore git sha + return tuple(int(x) for x in rocm_version.split(".")) + +def _check_cusparse_generic_available(): + return not TEST_WITH_ROCM + +def _check_hipsparse_generic_available(): + if not TEST_WITH_ROCM: + return False + + rocm_version = str(torch.version.hip) + rocm_version = rocm_version.split("-")[0] # ignore git sha + rocm_version_tuple = tuple(int(x) for x in rocm_version.split(".")) + return not (rocm_version_tuple is None or rocm_version_tuple < (5, 1)) + + +TEST_CUSPARSE_GENERIC = _check_cusparse_generic_available() +TEST_HIPSPARSE_GENERIC = _check_hipsparse_generic_available() + +# Shared by test_cuda.py and test_multigpu.py +def _create_scaling_models_optimizers(device="cuda", optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None): + # Create a module+optimizer that will use scaling, and a control module+optimizer + # that will not use scaling, against which the scaling-enabled module+optimizer can be compared. + mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device) + mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device) + with torch.no_grad(): + for c, s in zip(mod_control.parameters(), mod_scaling.parameters()): + s.copy_(c) + + kwargs = {"lr": 1.0} + if optimizer_kwargs is not None: + kwargs.update(optimizer_kwargs) + opt_control = optimizer_ctor(mod_control.parameters(), **kwargs) + opt_scaling = optimizer_ctor(mod_scaling.parameters(), **kwargs) + + return mod_control, mod_scaling, opt_control, opt_scaling + + +def _create_scaling_case(device="cuda", dtype=torch.float, optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None): + data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)), + (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)), + (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)), + (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))] + + loss_fn = torch.nn.MSELoss().cuda() + + skip_iter = 2 + + return _create_scaling_models_optimizers( + device=device, optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs, + ) + (data, loss_fn, skip_iter) + + +# Importing this module should NOT eagerly initialize CUDA +if not CUDA_ALREADY_INITIALIZED_ON_IMPORT: + assert not torch.cuda.is_initialized() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py new file mode 100644 index 0000000000000000000000000000000000000000..b5d1e769209b1f56304b1b3b0d2976b8ecaab8dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_device_type.py @@ -0,0 +1,1513 @@ +import copy +import gc +import inspect +import runpy +import sys +import threading +from collections import namedtuple +from enum import Enum +from functools import wraps, partial +from typing import List, Any, ClassVar, Optional, Sequence, Tuple, Union, Dict, Set +import unittest +import os +import torch +from torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM, TEST_MKL, \ + skipCUDANonDefaultStreamIf, TEST_WITH_ASAN, TEST_WITH_UBSAN, TEST_WITH_TSAN, \ + IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, IS_WINDOWS, TEST_MPS, \ + _TestParametrizer, compose_parametrize_fns, dtype_name, \ + TEST_WITH_MIOPEN_SUGGEST_NHWC, NATIVE_DEVICES, skipIfTorchDynamo, \ + get_tracked_input, clear_tracked_input, PRINT_REPRO_ON_FAILURE +from torch.testing._internal.common_cuda import _get_torch_cuda_version, \ + TEST_CUSPARSE_GENERIC, TEST_HIPSPARSE_GENERIC, _get_torch_rocm_version +from torch.testing._internal.common_dtype import get_all_dtypes + +try: + import psutil # type: ignore[import] + HAS_PSUTIL = True +except ImportError: + HAS_PSUTIL = False + +# Note [Writing Test Templates] +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# This note was written shortly after the PyTorch 1.9 release. +# If you notice it's out-of-date or think it could be improved then please +# file an issue. +# +# PyTorch has its own framework for instantiating test templates. That is, for +# taking test classes that look similar to unittest or pytest +# compatible test classes and optionally doing the following: +# +# - instantiating a version of the test class for each available device type +# (often the CPU, CUDA, and META device types) +# - further instantiating a version of each test that's always specialized +# on the test class's device type, and optionally specialized further +# on datatypes or operators +# +# This functionality is similar to pytest's parametrize functionality +# (see https://docs.pytest.org/en/6.2.x/parametrize.html), but with considerable +# additional logic that specializes the instantiated test classes for their +# device types (see CPUTestBase and CUDATestBase below), supports a variety +# of composable decorators that allow for test filtering and setting +# tolerances, and allows tests parametrized by operators to instantiate +# only the subset of device type x dtype that operator supports. +# +# This framework was built to make it easier to write tests that run on +# multiple device types, multiple datatypes (dtypes), and for multiple +# operators. It's also useful for controlling which tests are run. For example, +# only tests that use a CUDA device can be run on platforms with CUDA. +# Let's dive in with an example to get an idea for how it works: +# +# -------------------------------------------------------- +# A template class (looks like a regular unittest TestCase) +# class TestClassFoo(TestCase): +# +# # A template test that can be specialized with a device +# # NOTE: this test case is not runnable by unittest or pytest because it +# # accepts an extra positional argument, "device", that they do not understand +# def test_bar(self, device): +# pass +# +# # Function that instantiates a template class and its tests +# instantiate_device_type_tests(TestCommon, globals()) +# -------------------------------------------------------- +# +# In the above code example we see a template class and a single test template +# that can be instantiated with a device. The function +# instantiate_device_type_tests(), called at file scope, instantiates +# new test classes, one per available device type, and new tests in those +# classes from these templates. It actually does this by removing +# the class TestClassFoo and replacing it with classes like TestClassFooCPU +# and TestClassFooCUDA, instantiated test classes that inherit from CPUTestBase +# and CUDATestBase respectively. Additional device types, like XLA, +# (see https://github.com/pytorch/xla) can further extend the set of +# instantiated test classes to create classes like TestClassFooXLA. +# +# The test template, test_bar(), is also instantiated. In this case the template +# is only specialized on a device, so (depending on the available device +# types) it might become test_bar_cpu() in TestClassFooCPU and test_bar_cuda() +# in TestClassFooCUDA. We can think of the instantiated test classes as +# looking like this: +# +# -------------------------------------------------------- +# # An instantiated test class for the CPU device type +# class TestClassFooCPU(CPUTestBase): +# +# # An instantiated test that calls the template with the string representation +# # of a device from the test class's device type +# def test_bar_cpu(self): +# test_bar(self, 'cpu') +# +# # An instantiated test class for the CUDA device type +# class TestClassFooCUDA(CUDATestBase): +# +# # An instantiated test that calls the template with the string representation +# # of a device from the test class's device type +# def test_bar_cuda(self): +# test_bar(self, 'cuda:0') +# -------------------------------------------------------- +# +# These instantiated test classes ARE discoverable and runnable by both +# unittest and pytest. One thing that may be confusing, however, is that +# attempting to run "test_bar" will not work, despite it appearing in the +# original template code. This is because "test_bar" is no longer discoverable +# after instantiate_device_type_tests() runs, as the above snippet shows. +# Instead "test_bar_cpu" and "test_bar_cuda" may be run directly, or both +# can be run with the option "-k test_bar". +# +# Removing the template class and adding the instantiated classes requires +# passing "globals()" to instantiate_device_type_tests(), because it +# edits the file's Python objects. +# +# As mentioned, tests can be additionally parametrized on dtypes or +# operators. Datatype parametrization uses the @dtypes decorator and +# require a test template like this: +# +# -------------------------------------------------------- +# # A template test that can be specialized with a device and a datatype (dtype) +# @dtypes(torch.float32, torch.int64) +# def test_car(self, device, dtype) +# pass +# -------------------------------------------------------- +# +# If the CPU and CUDA device types are available this test would be +# instantiated as 4 tests that cover the cross-product of the two dtypes +# and two device types: +# +# - test_car_cpu_float32 +# - test_car_cpu_int64 +# - test_car_cuda_float32 +# - test_car_cuda_int64 +# +# The dtype is passed as a torch.dtype object. +# +# Tests parametrized on operators (actually on OpInfos, more on that in a +# moment...) use the @ops decorator and require a test template like this: +# -------------------------------------------------------- +# # A template test that can be specialized with a device, dtype, and OpInfo +# @ops(op_db) +# def test_car(self, device, dtype, op) +# pass +# -------------------------------------------------------- +# +# See the documentation for the @ops decorator below for additional details +# on how to use it and see the note [OpInfos] in +# common_methods_invocations.py for more details on OpInfos. +# +# A test parametrized over the entire "op_db", which contains hundreds of +# OpInfos, will likely have hundreds or thousands of instantiations. The +# test will be instantiated on the cross-product of device types, operators, +# and the dtypes the operator supports on that device type. The instantiated +# tests will have names like: +# +# - test_car_add_cpu_float32 +# - test_car_sub_cuda_int64 +# +# The first instantiated test calls the original test_car() with the OpInfo +# for torch.add as its "op" argument, the string 'cpu' for its "device" argument, +# and the dtype torch.float32 for is "dtype" argument. The second instantiated +# test calls the test_car() with the OpInfo for torch.sub, a CUDA device string +# like 'cuda:0' or 'cuda:1' for its "device" argument, and the dtype +# torch.int64 for its "dtype argument." +# +# In addition to parametrizing over device, dtype, and ops via OpInfos, the +# @parametrize decorator is supported for arbitrary parametrizations: +# -------------------------------------------------------- +# # A template test that can be specialized with a device, dtype, and value for x +# @parametrize("x", range(5)) +# def test_car(self, device, dtype, x) +# pass +# -------------------------------------------------------- +# +# See the documentation for @parametrize in common_utils.py for additional details +# on this. Note that the instantiate_device_type_tests() function will handle +# such parametrizations; there is no need to additionally call +# instantiate_parametrized_tests(). +# +# Clever test filtering can be very useful when working with parametrized +# tests. "-k test_car" would run every instantiated variant of the test_car() +# test template, and "-k test_car_add" runs every variant instantiated with +# torch.add. +# +# It is important to use the passed device and dtype as appropriate. Use +# helper functions like make_tensor() that require explicitly specifying +# the device and dtype so they're not forgotten. +# +# Test templates can use a variety of composable decorators to specify +# additional options and requirements, some are listed here: +# +# - @deviceCountAtLeast() +# Passes a list of strings representing all available devices of +# the test class's device type as the test template's "device" argument. +# If there are fewer devices than the value passed to the decorator +# the test is skipped. +# - @dtypes() +# In addition to accepting multiple dtypes, the @dtypes decorator +# can accept a sequence of tuple pairs of dtypes. The test template +# will be called with each tuple for its "dtype" argument. +# - @onlyNativeDeviceTypes +# Skips the test if the device is not a native device type (currently CPU, CUDA, Meta) +# - @onlyCPU +# Skips the test if the device is not a CPU device +# - @onlyCUDA +# Skips the test if the device is not a CUDA device +# - @onlyMPS +# Skips the test if the device is not a MPS device +# - @skipCPUIfNoLapack +# Skips the test if the device is a CPU device and LAPACK is not installed +# - @skipCPUIfNoMkl +# Skips the test if the device is a CPU device and MKL is not installed +# - @skipCUDAIfNoMagma +# Skips the test if the device is a CUDA device and MAGMA is not installed +# - @skipCUDAIfRocm +# Skips the test if the device is a CUDA device and ROCm is being used + + +# Note [Adding a Device Type] +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# To add a device type: +# +# (1) Create a new "TestBase" extending DeviceTypeTestBase. +# See CPUTestBase and CUDATestBase below. +# (2) Define the "device_type" attribute of the base to be the +# appropriate string. +# (3) Add logic to this file that appends your base class to +# device_type_test_bases when your device type is available. +# (4) (Optional) Write setUpClass/tearDownClass class methods that +# instantiate dependencies (see MAGMA in CUDATestBase). +# (5) (Optional) Override the "instantiate_test" method for total +# control over how your class creates tests. +# +# setUpClass is called AFTER tests have been created and BEFORE and ONLY IF +# they are run. This makes it useful for initializing devices and dependencies. + + +# Note [Overriding methods in generic tests] +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Device generic tests look a lot like normal test classes, but they differ +# from ordinary classes in some important ways. In particular, overriding +# methods in generic tests doesn't work quite the way you expect. +# +# class TestFooDeviceType(TestCase): +# # Intention is to override +# def assertEqual(self, x, y): +# # This DOESN'T WORK! +# super().assertEqual(x, y) +# +# If you try to run this code, you'll get an error saying that TestFooDeviceType +# is not in scope. This is because after instantiating our classes, we delete +# it from the parent scope. Instead, you need to hardcode a direct invocation +# of the desired subclass call, e.g., +# +# class TestFooDeviceType(TestCase): +# # Intention is to override +# def assertEqual(self, x, y): +# TestCase.assertEqual(x, y) +# +# However, a less error-prone way of customizing the behavior of TestCase +# is to either (1) add your functionality to TestCase and make it toggled +# by a class attribute, or (2) create your own subclass of TestCase, and +# then inherit from it for your generic test. + + +def _dtype_test_suffix(dtypes): + """ Returns the test suffix for a dtype, sequence of dtypes, or None. """ + if isinstance(dtypes, (list, tuple)): + if len(dtypes) == 0: + return '' + return '_' + '_'.join(dtype_name(d) for d in dtypes) + elif dtypes: + return f'_{dtype_name(dtypes)}' + else: + return '' + + +def _update_param_kwargs(param_kwargs, name, value): + """ Adds a kwarg with the specified name and value to the param_kwargs dict. """ + # Make name plural (e.g. devices / dtypes) if the value is composite. + plural_name = f'{name}s' + + # Clear out old entries of the arg if any. + if name in param_kwargs: + del param_kwargs[name] + if plural_name in param_kwargs: + del param_kwargs[plural_name] + + if isinstance(value, (list, tuple)): + param_kwargs[plural_name] = value + elif value is not None: + param_kwargs[name] = value + + # Leave param_kwargs as-is when value is None. + + +class DeviceTypeTestBase(TestCase): + device_type: str = 'generic_device_type' + + # Flag to disable test suite early due to unrecoverable error such as CUDA error. + _stop_test_suite = False + + # Precision is a thread-local setting since it may be overridden per test + _tls = threading.local() + _tls.precision = TestCase._precision + _tls.rel_tol = TestCase._rel_tol + + @property + def precision(self): + return self._tls.precision + + @precision.setter + def precision(self, prec): + self._tls.precision = prec + + @property + def rel_tol(self): + return self._tls.rel_tol + + @rel_tol.setter + def rel_tol(self, prec): + self._tls.rel_tol = prec + + # Returns a string representing the device that single device tests should use. + # Note: single device tests use this device exclusively. + @classmethod + def get_primary_device(cls): + return cls.device_type + + @classmethod + def _init_and_get_primary_device(cls): + try: + return cls.get_primary_device() + except Exception: + # For CUDATestBase, XLATestBase, and possibly others, the primary device won't be available + # until setUpClass() sets it. Call that manually here if needed. + if hasattr(cls, 'setUpClass'): + cls.setUpClass() + return cls.get_primary_device() + + # Returns a list of strings representing all available devices of this + # device type. The primary device must be the first string in the list + # and the list must contain no duplicates. + # Note: UNSTABLE API. Will be replaced once PyTorch has a device generic + # mechanism of acquiring all available devices. + @classmethod + def get_all_devices(cls): + return [cls.get_primary_device()] + + # Returns the dtypes the test has requested. + # Prefers device-specific dtype specifications over generic ones. + @classmethod + def _get_dtypes(cls, test): + if not hasattr(test, 'dtypes'): + return None + + default_dtypes = test.dtypes.get('all') + msg = f"@dtypes is mandatory when using @dtypesIf however '{test.__name__}' didn't specify it" + assert default_dtypes is not None, msg + + return test.dtypes.get(cls.device_type, default_dtypes) + + def _get_precision_override(self, test, dtype): + if not hasattr(test, 'precision_overrides'): + return self.precision + return test.precision_overrides.get(dtype, self.precision) + + def _get_tolerance_override(self, test, dtype): + if not hasattr(test, 'tolerance_overrides'): + return self.precision, self.rel_tol + return test.tolerance_overrides.get(dtype, tol(self.precision, self.rel_tol)) + + def _apply_precision_override_for_test(self, test, param_kwargs): + dtype = param_kwargs['dtype'] if 'dtype' in param_kwargs else None + dtype = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else dtype + if dtype: + self.precision = self._get_precision_override(test, dtype) + self.precision, self.rel_tol = self._get_tolerance_override(test, dtype) + + # Creates device-specific tests. + @classmethod + def instantiate_test(cls, name, test, *, generic_cls=None): + + def instantiate_test_helper(cls, name, *, test, param_kwargs=None, decorator_fn=lambda _: []): + # Add the device param kwarg if the test needs device or devices. + param_kwargs = {} if param_kwargs is None else param_kwargs + test_sig_params = inspect.signature(test).parameters + if 'device' in test_sig_params or 'devices' in test_sig_params: + device_arg: str = cls._init_and_get_primary_device() + if hasattr(test, 'num_required_devices'): + device_arg = cls.get_all_devices() + _update_param_kwargs(param_kwargs, 'device', device_arg) + + # Apply decorators based on param kwargs. + for decorator in decorator_fn(param_kwargs): + test = decorator(test) + + # Constructs the test + @wraps(test) + def instantiated_test(self, param_kwargs=param_kwargs): + # Sets precision and runs test + # Note: precision is reset after the test is run + guard_precision = self.precision + guard_rel_tol = self.rel_tol + try: + self._apply_precision_override_for_test(test, param_kwargs) + result = test(self, **param_kwargs) + except RuntimeError as rte: + # check if rte should stop entire test suite. + self._stop_test_suite = self._should_stop_test_suite() + # Check if test has been decorated with `@expectedFailure` + # Using `__unittest_expecting_failure__` attribute, see + # https://github.com/python/cpython/blob/ffa505b580464/Lib/unittest/case.py#L164 + # In that case, make it fail with "unexpected success" by suppressing exception + if getattr(test, "__unittest_expecting_failure__", False) and self._stop_test_suite: + import sys + print("Suppressing fatal exception to trigger unexpected success", file=sys.stderr) + return + # raise the runtime error as is for the test suite to record. + raise rte + finally: + self.precision = guard_precision + self.rel_tol = guard_rel_tol + + return result + + assert not hasattr(cls, name), f"Redefinition of test {name}" + setattr(cls, name, instantiated_test) + + def default_parametrize_fn(test, generic_cls, device_cls): + # By default, no parametrization is needed. + yield (test, '', {}, lambda _: []) + + # Parametrization decorators set the parametrize_fn attribute on the test. + parametrize_fn = getattr(test, "parametrize_fn", default_parametrize_fn) + + # If one of the @dtypes* decorators is present, also parametrize over the dtypes set by it. + dtypes = cls._get_dtypes(test) + if dtypes is not None: + + def dtype_parametrize_fn(test, generic_cls, device_cls, dtypes=dtypes): + for dtype in dtypes: + param_kwargs: Dict[str, Any] = {} + _update_param_kwargs(param_kwargs, "dtype", dtype) + + # Note that an empty test suffix is set here so that the dtype can be appended + # later after the device. + yield (test, '', param_kwargs, lambda _: []) + + parametrize_fn = compose_parametrize_fns(dtype_parametrize_fn, parametrize_fn) + + # Instantiate the parametrized tests. + for (test, test_suffix, param_kwargs, decorator_fn) in parametrize_fn(test, generic_cls, cls): # noqa: B020 + test_suffix = '' if test_suffix == '' else '_' + test_suffix + device_suffix = '_' + cls.device_type + + # Note: device and dtype suffix placement + # Special handling here to place dtype(s) after device according to test name convention. + dtype_kwarg = None + if 'dtype' in param_kwargs or 'dtypes' in param_kwargs: + dtype_kwarg = param_kwargs['dtypes'] if 'dtypes' in param_kwargs else param_kwargs['dtype'] + test_name = f'{name}{test_suffix}{device_suffix}{_dtype_test_suffix(dtype_kwarg)}' + + instantiate_test_helper(cls=cls, name=test_name, test=test, param_kwargs=param_kwargs, + decorator_fn=decorator_fn) + + def run(self, result=None): + super().run(result=result) + # Early terminate test if _stop_test_suite is set. + if self._stop_test_suite: + result.stop() + + +class CPUTestBase(DeviceTypeTestBase): + device_type = 'cpu' + + # No critical error should stop CPU test suite + def _should_stop_test_suite(self): + return False + +class CUDATestBase(DeviceTypeTestBase): + device_type = 'cuda' + _do_cuda_memory_leak_check = True + _do_cuda_non_default_stream = True + primary_device: ClassVar[str] + cudnn_version: ClassVar[Any] + no_magma: ClassVar[bool] + no_cudnn: ClassVar[bool] + + def has_cudnn(self): + return not self.no_cudnn + + @classmethod + def get_primary_device(cls): + return cls.primary_device + + @classmethod + def get_all_devices(cls): + primary_device_idx = int(cls.get_primary_device().split(':')[1]) + num_devices = torch.cuda.device_count() + + prim_device = cls.get_primary_device() + cuda_str = 'cuda:{0}' + non_primary_devices = [cuda_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx] + return [prim_device] + non_primary_devices + + @classmethod + def setUpClass(cls): + # has_magma shows up after cuda is initialized + t = torch.ones(1).cuda() + cls.no_magma = not torch.cuda.has_magma + + # Determines if cuDNN is available and its version + cls.no_cudnn = not torch.backends.cudnn.is_acceptable(t) + cls.cudnn_version = None if cls.no_cudnn else torch.backends.cudnn.version() + + # Acquires the current device as the primary (test) device + cls.primary_device = f'cuda:{torch.cuda.current_device()}' + +# See Note [Lazy Tensor tests in device agnostic testing] +lazy_ts_backend_init = False +class LazyTestBase(DeviceTypeTestBase): + device_type = 'lazy' + + def _should_stop_test_suite(self): + return False + + @classmethod + def setUpClass(cls): + import torch._lazy + import torch._lazy.metrics + import torch._lazy.ts_backend + global lazy_ts_backend_init + if not lazy_ts_backend_init: + # Need to connect the TS backend to lazy key before running tests + torch._lazy.ts_backend.init() + lazy_ts_backend_init = True + +class MPSTestBase(DeviceTypeTestBase): + device_type = 'mps' + primary_device: ClassVar[str] + + @classmethod + def get_primary_device(cls): + return cls.primary_device + + @classmethod + def get_all_devices(cls): + # currently only one device is supported on MPS backend + prim_device = cls.get_primary_device() + return [prim_device] + + @classmethod + def setUpClass(cls): + cls.primary_device = 'mps:0' + + def _should_stop_test_suite(self): + return False + +class PrivateUse1TestBase(DeviceTypeTestBase): + primary_device: ClassVar[str] + device_mod = None + device_type = 'privateuse1' + + @classmethod + def get_primary_device(cls): + return cls.primary_device + + @classmethod + def get_all_devices(cls): + primary_device_idx = int(cls.get_primary_device().split(':')[1]) + num_devices = cls.device_mod.device_count() + prim_device = cls.get_primary_device() + device_str = f'{cls.device_type}:{{0}}' + non_primary_devices = [device_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx] + return [prim_device] + non_primary_devices + + @classmethod + def setUpClass(cls): + cls.device_type = torch._C._get_privateuse1_backend_name() + cls.device_mod = getattr(torch, cls.device_type, None) + assert cls.device_mod is not None, f'''torch has no module of `{cls.device_type}`, you should register + a module by `torch._register_device_module`.''' + cls.primary_device = f'{cls.device_type}:{cls.device_mod.current_device()}' + +# Adds available device-type-specific test base classes +def get_device_type_test_bases(): + # set type to List[Any] due to mypy list-of-union issue: + # https://github.com/python/mypy/issues/3351 + test_bases: List[Any] = list() + + if IS_SANDCASTLE or IS_FBCODE: + if IS_REMOTE_GPU: + # Skip if sanitizer is enabled + if not TEST_WITH_ASAN and not TEST_WITH_TSAN and not TEST_WITH_UBSAN: + test_bases.append(CUDATestBase) + else: + test_bases.append(CPUTestBase) + else: + test_bases.append(CPUTestBase) + if torch.cuda.is_available(): + test_bases.append(CUDATestBase) + device_type = torch._C._get_privateuse1_backend_name() + device_mod = getattr(torch, device_type, None) + if hasattr(device_mod, "is_available") and device_mod.is_available(): + test_bases.append(PrivateUse1TestBase) + # Disable MPS testing in generic device testing temporarily while we're + # ramping up support. + # elif torch.backends.mps.is_available(): + # test_bases.append(MPSTestBase) + + return test_bases + +device_type_test_bases = get_device_type_test_bases() + + +def filter_desired_device_types(device_type_test_bases, except_for=None, only_for=None): + # device type cannot appear in both except_for and only_for + intersect = set(except_for if except_for else []) & set(only_for if only_for else []) + assert not intersect, f"device ({intersect}) appeared in both except_for and only_for" + + if except_for: + device_type_test_bases = filter( + lambda x: x.device_type not in except_for, device_type_test_bases) + if only_for: + device_type_test_bases = filter( + lambda x: x.device_type in only_for, device_type_test_bases) + + return list(device_type_test_bases) + + +# Note [How to extend DeviceTypeTestBase to add new test device] +# The following logic optionally allows downstream projects like pytorch/xla to +# add more test devices. +# Instructions: +# - Add a python file (e.g. pytorch/xla/test/pytorch_test_base.py) in downstream project. +# - Inside the file, one should inherit from `DeviceTypeTestBase` class and define +# a new DeviceTypeTest class (e.g. `XLATestBase`) with proper implementation of +# `instantiate_test` method. +# - DO NOT import common_device_type inside the file. +# `runpy.run_path` with `globals()` already properly setup the context so that +# `DeviceTypeTestBase` is already available. +# - Set a top-level variable `TEST_CLASS` equal to your new class. +# E.g. TEST_CLASS = XLATensorBase +# - To run tests with new device type, set `TORCH_TEST_DEVICE` env variable to path +# to this file. Multiple paths can be separated by `:`. +# See pytorch/xla/test/pytorch_test_base.py for a more detailed example. +_TORCH_TEST_DEVICES = os.environ.get('TORCH_TEST_DEVICES', None) +if _TORCH_TEST_DEVICES: + for path in _TORCH_TEST_DEVICES.split(':'): + # runpy (a stdlib module) lacks annotations + mod = runpy.run_path(path, init_globals=globals()) # type: ignore[func-returns-value] + device_type_test_bases.append(mod['TEST_CLASS']) + + +PYTORCH_CUDA_MEMCHECK = os.getenv('PYTORCH_CUDA_MEMCHECK', '0') == '1' + +PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY = 'PYTORCH_TESTING_DEVICE_ONLY_FOR' +PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY = 'PYTORCH_TESTING_DEVICE_EXCEPT_FOR' + + +# Adds 'instantiated' device-specific test cases to the given scope. +# The tests in these test cases are derived from the generic tests in +# generic_test_class. This function should be used instead of +# instantiate_parametrized_tests() if the test class contains +# device-specific tests (NB: this supports additional @parametrize usage). +# +# See note "Writing Test Templates" +def instantiate_device_type_tests(generic_test_class, scope, except_for=None, only_for=None, include_lazy=False, allow_mps=False): + # Removes the generic test class from its enclosing scope so its tests + # are not discoverable. + del scope[generic_test_class.__name__] + + # Creates an 'empty' version of the generic_test_class + # Note: we don't inherit from the generic_test_class directly because + # that would add its tests to our test classes and they would be + # discovered (despite not being runnable). Inherited methods also + # can't be removed later, and we can't rely on load_tests because + # pytest doesn't support it (as of this writing). + empty_name = generic_test_class.__name__ + "_base" + empty_class = type(empty_name, generic_test_class.__bases__, {}) + + # Acquires members names + # See Note [Overriding methods in generic tests] + generic_members = set(generic_test_class.__dict__.keys()) - set(empty_class.__dict__.keys()) + generic_tests = [x for x in generic_members if x.startswith('test')] + + # allow callers to specifically opt tests into being tested on MPS, similar to `include_lazy` + test_bases = device_type_test_bases.copy() + if allow_mps and TEST_MPS and MPSTestBase not in test_bases: + test_bases.append(MPSTestBase) + # Filter out the device types based on user inputs + desired_device_type_test_bases = filter_desired_device_types(test_bases, except_for, only_for) + if include_lazy: + # Note [Lazy Tensor tests in device agnostic testing] + # Right now, test_view_ops.py runs with LazyTensor. + # We don't want to opt every device-agnostic test into using the lazy device, + # because many of them will fail. + # So instead, the only way to opt a specific device-agnostic test file into + # lazy tensor testing is with include_lazy=True + if IS_FBCODE: + print("TorchScript backend not yet supported in FBCODE/OVRSOURCE builds", file=sys.stderr) + else: + desired_device_type_test_bases.append(LazyTestBase) + + def split_if_not_empty(x: str): + return x.split(",") if len(x) != 0 else [] + + # Filter out the device types based on environment variables if available + # Usage: + # export PYTORCH_TESTING_DEVICE_ONLY_FOR=cuda,cpu + # export PYTORCH_TESTING_DEVICE_EXCEPT_FOR=xla + env_only_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, '')) + env_except_for = split_if_not_empty(os.getenv(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, '')) + + desired_device_type_test_bases = filter_desired_device_types(desired_device_type_test_bases, + env_except_for, env_only_for) + + + # Creates device-specific test cases + for base in desired_device_type_test_bases: + class_name = generic_test_class.__name__ + base.device_type.upper() + + # type set to Any and suppressed due to unsupport runtime class: + # https://github.com/python/mypy/wiki/Unsupported-Python-Features + device_type_test_class: Any = type(class_name, (base, empty_class), {}) + + for name in generic_members: + if name in generic_tests: # Instantiates test member + test = getattr(generic_test_class, name) + # XLA-compat shim (XLA's instantiate_test takes doesn't take generic_cls) + sig = inspect.signature(device_type_test_class.instantiate_test) + if len(sig.parameters) == 3: + # Instantiates the device-specific tests + device_type_test_class.instantiate_test(name, copy.deepcopy(test), generic_cls=generic_test_class) + else: + device_type_test_class.instantiate_test(name, copy.deepcopy(test)) + else: # Ports non-test member + assert name not in device_type_test_class.__dict__, f"Redefinition of directly defined member {name}" + nontest = getattr(generic_test_class, name) + setattr(device_type_test_class, name, nontest) + + # Mimics defining the instantiated class in the caller's file + # by setting its module to the given class's and adding + # the module to the given scope. + # This lets the instantiated class be discovered by unittest. + device_type_test_class.__module__ = generic_test_class.__module__ + scope[class_name] = device_type_test_class + + +# Category of dtypes to run an OpInfo-based test for +# Example use: @ops(dtype=OpDTypes.supported) +# +# There are 5 categories: +# - supported: Every dtype supported by the operator. Use for exhaustive +# testing of all dtypes. +# - unsupported: Run tests on dtypes not supported by the operator. e.g. for +# testing the operator raises an error and doesn't crash. +# - supported_backward: Every dtype supported by the operator's backward pass. +# - unsupported_backward: Run tests on dtypes not supported by the operator's backward pass. +# - any_one: Runs a test for one dtype the operator supports. Prioritizes dtypes the +# operator supports in both forward and backward. +# - none: Useful for tests that are not dtype-specific. No dtype will be passed to the test +# when this is selected. +class OpDTypes(Enum): + supported = 0 # Test all supported dtypes (default) + unsupported = 1 # Test only unsupported dtypes + supported_backward = 2 # Test all supported backward dtypes + unsupported_backward = 3 # Test only unsupported backward dtypes + any_one = 4 # Test precisely one supported dtype + none = 5 # Instantiate no dtype variants (no dtype kwarg needed) + any_common_cpu_cuda_one = 6 # Test precisely one supported dtype that is common to both cuda and cpu + + +# Arbitrary order +ANY_DTYPE_ORDER = ( + torch.float32, + torch.float64, + torch.complex64, + torch.complex128, + torch.float16, + torch.bfloat16, + torch.long, + torch.int32, + torch.int16, + torch.int8, + torch.uint8, + torch.bool +) + +def _serialize_sample(sample_input): + # NB: For OpInfos, SampleInput.summary() prints in a cleaner way. + if getattr(sample_input, "summary", None) is not None: + return sample_input.summary() + return str(sample_input) + +# Decorator that defines the OpInfos a test template should be instantiated for. +# +# Example usage: +# +# @ops(unary_ufuncs) +# def test_numerics(self, device, dtype, op): +# +# +# This will instantiate variants of test_numerics for each given OpInfo, +# on each device the OpInfo's operator supports, and for every dtype supported by +# that operator. There are a few caveats to the dtype rule, explained below. +# +# The @ops decorator can accept two +# additional arguments, "dtypes" and "allowed_dtypes". If "dtypes" is specified +# then the test variants are instantiated for those dtypes, regardless of +# what the operator supports. If given "allowed_dtypes" then test variants +# are instantiated only for the intersection of allowed_dtypes and the dtypes +# they would otherwise be instantiated with. That is, allowed_dtypes composes +# with the options listed above and below. +# +# The "dtypes" argument can also accept additional values (see OpDTypes above): +# OpDTypes.supported - the test is instantiated for all dtypes the operator +# supports +# OpDTypes.unsupported - the test is instantiated for all dtypes the operator +# doesn't support +# OpDTypes.supported_backward - the test is instantiated for all dtypes the +# operator's gradient formula supports +# OpDTypes.unsupported_backward - the test is instantiated for all dtypes the +# operator's gradient formula doesn't support +# OpDTypes.any_one - the test is instantiated for one dtype the +# operator supports. The dtype supports forward and backward if possible. +# OpDTypes.none - the test is instantiated without any dtype. The test signature +# should not include a dtype kwarg in this case. +# +# These options allow tests to have considerable control over the dtypes +# they're instantiated for. + +class ops(_TestParametrizer): + def __init__(self, op_list, *, dtypes: Union[OpDTypes, Sequence[torch.dtype]] = OpDTypes.supported, + allowed_dtypes: Optional[Sequence[torch.dtype]] = None): + self.op_list = list(op_list) + self.opinfo_dtypes = dtypes + self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None + + def _parametrize_test(self, test, generic_cls, device_cls): + """ Parameterizes the given test function across each op and its associated dtypes. """ + if device_cls is None: + raise RuntimeError('The @ops decorator is only intended to be used in a device-specific ' + 'context; use it with instantiate_device_type_tests() instead of ' + 'instantiate_parametrized_tests()') + + op = check_exhausted_iterator = object() + for op in self.op_list: + # Determine the set of dtypes to use. + dtypes: Union[Set[torch.dtype], Set[None]] + if isinstance(self.opinfo_dtypes, Sequence): + dtypes = set(self.opinfo_dtypes) + elif self.opinfo_dtypes == OpDTypes.unsupported_backward: + dtypes = set(get_all_dtypes()).difference(op.supported_backward_dtypes(device_cls.device_type)) + elif self.opinfo_dtypes == OpDTypes.supported_backward: + dtypes = op.supported_backward_dtypes(device_cls.device_type) + elif self.opinfo_dtypes == OpDTypes.unsupported: + dtypes = set(get_all_dtypes()).difference(op.supported_dtypes(device_cls.device_type)) + elif self.opinfo_dtypes == OpDTypes.supported: + dtypes = op.supported_dtypes(device_cls.device_type) + elif self.opinfo_dtypes == OpDTypes.any_one: + # Tries to pick a dtype that supports both forward or backward + supported = op.supported_dtypes(device_cls.device_type) + supported_backward = op.supported_backward_dtypes(device_cls.device_type) + supported_both = supported.intersection(supported_backward) + dtype_set = supported_both if len(supported_both) > 0 else supported + for dtype in ANY_DTYPE_ORDER: + if dtype in dtype_set: + dtypes = {dtype} + break + else: + dtypes = {} + elif self.opinfo_dtypes == OpDTypes.any_common_cpu_cuda_one: + # Tries to pick a dtype that supports both CPU and CUDA + supported = op.dtypes.intersection(op.dtypesIfCUDA) + if supported: + dtypes = {next(dtype for dtype in ANY_DTYPE_ORDER if dtype in supported)} + else: + dtypes = {} + + elif self.opinfo_dtypes == OpDTypes.none: + dtypes = {None} + else: + raise RuntimeError(f"Unknown OpDType: {self.opinfo_dtypes}") + + if self.allowed_dtypes is not None: + dtypes = dtypes.intersection(self.allowed_dtypes) + + # Construct the test name; device / dtype parts are handled outside. + # See [Note: device and dtype suffix placement] + test_name = op.formatted_name + + for dtype in dtypes: + # Construct parameter kwargs to pass to the test. + param_kwargs = {'op': op} + _update_param_kwargs(param_kwargs, 'dtype', dtype) + + # NOTE: test_wrapper exists because we don't want to apply + # op-specific decorators to the original test. + # Test-specific decorators are applied to the original test, + # however. + try: + @wraps(test) + def test_wrapper(*args, **kwargs): + try: + return test(*args, **kwargs) + except unittest.SkipTest as e: + raise e + except Exception as e: + tracked_input = get_tracked_input() + if PRINT_REPRO_ON_FAILURE and tracked_input is not None: + raise Exception( + f"Caused by {tracked_input.type_desc} " + f"at index {tracked_input.index}: " + f"{_serialize_sample(tracked_input.val)}") from e + raise e + finally: + clear_tracked_input() + + # Initialize info for the last input seen. This is useful for tracking + # down which inputs caused a test failure. Note that TrackedInputIter is + # responsible for managing this. + test.tracked_input = None + + decorator_fn = partial(op.get_decorators, generic_cls.__name__, + test.__name__, device_cls.device_type, dtype) + + yield (test_wrapper, test_name, param_kwargs, decorator_fn) + except Exception as ex: + # Provides an error message for debugging before rethrowing the exception + print(f"Failed to instantiate {test_name} for op {op.name}!") + raise ex + if op is check_exhausted_iterator: + raise ValueError('An empty op_list was passed to @ops. ' + 'Note that this may result from reuse of a generator.') + +# Decorator that skips a test if the given condition is true. +# Notes: +# (1) Skip conditions stack. +# (2) Skip conditions can be bools or strings. If a string the +# test base must have defined the corresponding attribute to be False +# for the test to run. If you want to use a string argument you should +# probably define a new decorator instead (see below). +# (3) Prefer the existing decorators to defining the 'device_type' kwarg. +class skipIf: + + def __init__(self, dep, reason, device_type=None): + self.dep = dep + self.reason = reason + self.device_type = device_type + + def __call__(self, fn): + + @wraps(fn) + def dep_fn(slf, *args, **kwargs): + if self.device_type is None or self.device_type == slf.device_type: + if (isinstance(self.dep, str) and getattr(slf, self.dep, True)) or (isinstance(self.dep, bool) and self.dep): + raise unittest.SkipTest(self.reason) + + return fn(slf, *args, **kwargs) + return dep_fn + + +# Skips a test on CPU if the condition is true. +class skipCPUIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='cpu') + + +# Skips a test on CUDA if the condition is true. +class skipCUDAIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='cuda') + +# Skips a test on Lazy if the condition is true. +class skipLazyIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='lazy') + +# Skips a test on Meta if the condition is true. +class skipMetaIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='meta') + +# Skips a test on MPS if the condition is true. +class skipMPSIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='mps') + +# Skips a test on XLA if the condition is true. +class skipXLAIf(skipIf): + + def __init__(self, dep, reason): + super().__init__(dep, reason, device_type='xla') + +class skipPRIVATEUSE1If(skipIf): + + def __init__(self, dep, reason): + device_type = torch._C._get_privateuse1_backend_name() + super().__init__(dep, reason, device_type=device_type) + +def _has_sufficient_memory(device, size): + if torch.device(device).type == 'cuda': + if not torch.cuda.is_available(): + return False + gc.collect() + torch.cuda.empty_cache() + # torch.cuda.mem_get_info, aka cudaMemGetInfo, returns a tuple of (free memory, total memory) of a GPU + if device == 'cuda': + device = 'cuda:0' + return torch.cuda.memory.mem_get_info(device)[0] >= size + + if device == 'xla': + raise unittest.SkipTest('TODO: Memory availability checks for XLA?') + + if device != 'cpu': + raise unittest.SkipTest('Unknown device type') + + # CPU + if not HAS_PSUTIL: + raise unittest.SkipTest('Need psutil to determine if memory is sufficient') + + # The sanitizers have significant memory overheads + if TEST_WITH_ASAN or TEST_WITH_TSAN or TEST_WITH_UBSAN: + effective_size = size * 10 + else: + effective_size = size + + if psutil.virtual_memory().available < effective_size: + gc.collect() + return psutil.virtual_memory().available >= effective_size + + +def largeTensorTest(size, device=None): + """Skip test if the device has insufficient memory to run the test + + size may be a number of bytes, a string of the form "N GB", or a callable + + If the test is a device generic test, available memory on the primary device will be checked. + It can also be overriden by the optional `device=` argument. + In other tests, the `device=` argument needs to be specified. + """ + if isinstance(size, str): + assert size.endswith(('GB', 'gb')), "only bytes or GB supported" + size = 1024 ** 3 * int(size[:-2]) + + def inner(fn): + @wraps(fn) + def dep_fn(self, *args, **kwargs): + size_bytes = size(self, *args, **kwargs) if callable(size) else size + _device = device if device is not None else self.get_primary_device() + if not _has_sufficient_memory(_device, size_bytes): + raise unittest.SkipTest(f'Insufficient {_device} memory') + + return fn(self, *args, **kwargs) + return dep_fn + return inner + + +class expectedFailure: + + def __init__(self, device_type): + self.device_type = device_type + + def __call__(self, fn): + + @wraps(fn) + def efail_fn(slf, *args, **kwargs): + if self.device_type is None or self.device_type == slf.device_type: + try: + fn(slf, *args, **kwargs) + except Exception: + return + else: + slf.fail('expected test to fail, but it passed') + + return fn(slf, *args, **kwargs) + return efail_fn + + +class onlyOn: + + def __init__(self, device_type): + self.device_type = device_type + + def __call__(self, fn): + + @wraps(fn) + def only_fn(slf, *args, **kwargs): + if self.device_type != slf.device_type: + reason = f"Only runs on {self.device_type}" + raise unittest.SkipTest(reason) + + return fn(slf, *args, **kwargs) + + return only_fn + + +# Decorator that provides all available devices of the device type to the test +# as a list of strings instead of providing a single device string. +# Skips the test if the number of available devices of the variant's device +# type is less than the 'num_required_devices' arg. +class deviceCountAtLeast: + + def __init__(self, num_required_devices): + self.num_required_devices = num_required_devices + + def __call__(self, fn): + assert not hasattr(fn, 'num_required_devices'), f"deviceCountAtLeast redefinition for {fn.__name__}" + fn.num_required_devices = self.num_required_devices + + @wraps(fn) + def multi_fn(slf, devices, *args, **kwargs): + if len(devices) < self.num_required_devices: + reason = f"fewer than {self.num_required_devices} devices detected" + raise unittest.SkipTest(reason) + + return fn(slf, devices, *args, **kwargs) + + return multi_fn + +# Only runs the test on the native device type (currently CPU, CUDA, Meta and PRIVATEUSE1) +def onlyNativeDeviceTypes(fn): + @wraps(fn) + def only_fn(self, *args, **kwargs): + if self.device_type not in NATIVE_DEVICES: + reason = f"onlyNativeDeviceTypes: doesn't run on {self.device_type}" + raise unittest.SkipTest(reason) + + return fn(self, *args, **kwargs) + + return only_fn + +# Specifies per-dtype precision overrides. +# Ex. +# +# @precisionOverride({torch.half : 1e-2, torch.float : 1e-4}) +# @dtypes(torch.half, torch.float, torch.double) +# def test_X(self, device, dtype): +# ... +# +# When the test is instantiated its class's precision will be set to the +# corresponding override, if it exists. +# self.precision can be accessed directly, and it also controls the behavior of +# functions like self.assertEqual(). +# +# Note that self.precision is a scalar value, so if you require multiple +# precisions (or are working with multiple dtypes) they should be specified +# explicitly and computed using self.precision (e.g. +# self.precision *2, max(1, self.precision)). +class precisionOverride: + + def __init__(self, d): + assert isinstance(d, dict), "precisionOverride not given a dtype : precision dict!" + for dtype in d.keys(): + assert isinstance(dtype, torch.dtype), f"precisionOverride given unknown dtype {dtype}" + + self.d = d + + def __call__(self, fn): + fn.precision_overrides = self.d + return fn + +# Specifies per-dtype tolerance overrides tol(atol, rtol). It has priority over +# precisionOverride. +# Ex. +# +# @toleranceOverride({torch.float : tol(atol=1e-2, rtol=1e-3}, +# torch.double : tol{atol=1e-4, rtol = 0}) +# @dtypes(torch.half, torch.float, torch.double) +# def test_X(self, device, dtype): +# ... +# +# When the test is instantiated its class's tolerance will be set to the +# corresponding override, if it exists. +# self.rtol and self.precision can be accessed directly, and they also control +# the behavior of functions like self.assertEqual(). +# +# The above example sets atol = 1e-2 and rtol = 1e-3 for torch.float and +# atol = 1e-4 and rtol = 0 for torch.double. +tol = namedtuple('tol', ['atol', 'rtol']) + +class toleranceOverride: + def __init__(self, d): + assert isinstance(d, dict), "toleranceOverride not given a dtype : tol dict!" + for dtype, prec in d.items(): + assert isinstance(dtype, torch.dtype), f"toleranceOverride given unknown dtype {dtype}" + assert isinstance(prec, tol), "toleranceOverride not given a dtype : tol dict!" + + self.d = d + + def __call__(self, fn): + fn.tolerance_overrides = self.d + return fn + +# Decorator that instantiates a variant of the test for each given dtype. +# Notes: +# (1) Tests that accept the dtype argument MUST use this decorator. +# (2) Can be overridden for CPU or CUDA, respectively, using dtypesIfCPU +# or dtypesIfCUDA. +# (3) Can accept an iterable of dtypes or an iterable of tuples +# of dtypes. +# Examples: +# @dtypes(torch.float32, torch.float64) +# @dtypes((torch.long, torch.float32), (torch.int, torch.float64)) +class dtypes: + + def __init__(self, *args, device_type="all"): + if len(args) > 0 and isinstance(args[0], (list, tuple)): + for arg in args: + assert isinstance(arg, (list, tuple)), \ + "When one dtype variant is a tuple or list, " \ + "all dtype variants must be. " \ + f"Received non-list non-tuple dtype {str(arg)}" + assert all(isinstance(dtype, torch.dtype) for dtype in arg), f"Unknown dtype in {str(arg)}" + else: + assert all(isinstance(arg, torch.dtype) for arg in args), f"Unknown dtype in {str(args)}" + + self.args = args + self.device_type = device_type + + def __call__(self, fn): + d = getattr(fn, 'dtypes', {}) + assert self.device_type not in d, f"dtypes redefinition for {self.device_type}" + d[self.device_type] = self.args + fn.dtypes = d + return fn + + +# Overrides specified dtypes on the CPU. +class dtypesIfCPU(dtypes): + + def __init__(self, *args): + super().__init__(*args, device_type='cpu') + + +# Overrides specified dtypes on CUDA. +class dtypesIfCUDA(dtypes): + + def __init__(self, *args): + super().__init__(*args, device_type='cuda') + +class dtypesIfMPS(dtypes): + + def __init__(self, *args): + super().__init__(*args, device_type='mps') + +class dtypesIfPRIVATEUSE1(dtypes): + + def __init__(self, *args): + super().__init__(*args, device_type=torch._C._get_privateuse1_backend_name()) + +def onlyCPU(fn): + return onlyOn('cpu')(fn) + + +def onlyCUDA(fn): + return onlyOn('cuda')(fn) + + +def onlyMPS(fn): + return onlyOn('mps')(fn) + +def onlyPRIVATEUSE1(fn): + device_type = torch._C._get_privateuse1_backend_name() + device_mod = getattr(torch, device_type, None) + if device_mod is None: + reason = f"Skip as torch has no module of {device_type}" + return unittest.skip(reason)(fn) + return onlyOn(device_type)(fn) + +def onlyCUDAAndPRIVATEUSE1(fn): + @wraps(fn) + def only_fn(self, *args, **kwargs): + if self.device_type not in ('cuda', torch._C._get_privateuse1_backend_name()): + reason = f"onlyCUDAAndPRIVATEUSE1: doesn't run on {self.device_type}" + raise unittest.SkipTest(reason) + + return fn(self, *args, **kwargs) + + return only_fn + +def disablecuDNN(fn): + + @wraps(fn) + def disable_cudnn(self, *args, **kwargs): + if self.device_type == 'cuda' and self.has_cudnn(): + with torch.backends.cudnn.flags(enabled=False): + return fn(self, *args, **kwargs) + return fn(self, *args, **kwargs) + + return disable_cudnn + +def disableMkldnn(fn): + + @wraps(fn) + def disable_mkldnn(self, *args, **kwargs): + if torch.backends.mkldnn.is_available(): + with torch.backends.mkldnn.flags(enabled=False): + return fn(self, *args, **kwargs) + return fn(self, *args, **kwargs) + + return disable_mkldnn + + +def expectedFailureCUDA(fn): + return expectedFailure('cuda')(fn) + +def expectedFailureMeta(fn): + return skipIfTorchDynamo()(expectedFailure('meta')(fn)) + +def expectedFailureXLA(fn): + return expectedFailure('xla')(fn) + +# Skips a test on CPU if LAPACK is not available. +def skipCPUIfNoLapack(fn): + return skipCPUIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")(fn) + + +# Skips a test on CPU if FFT is not available. +def skipCPUIfNoFFT(fn): + return skipCPUIf(not torch._C.has_spectral, "PyTorch is built without FFT support")(fn) + + +# Skips a test on CPU if MKL is not available. +def skipCPUIfNoMkl(fn): + return skipCPUIf(not TEST_MKL, "PyTorch is built without MKL support")(fn) + + +# Skips a test on CPU if MKL Sparse is not available (it's not linked on Windows). +def skipCPUIfNoMklSparse(fn): + return skipCPUIf(IS_WINDOWS or not TEST_MKL, "PyTorch is built without MKL support")(fn) + + +# Skips a test on CPU if mkldnn is not available. +def skipCPUIfNoMkldnn(fn): + return skipCPUIf(not torch.backends.mkldnn.is_available(), "PyTorch is built without mkldnn support")(fn) + + +# Skips a test on CUDA if MAGMA is not available. +def skipCUDAIfNoMagma(fn): + return skipCUDAIf('no_magma', "no MAGMA library detected")(skipCUDANonDefaultStreamIf(True)(fn)) + +def has_cusolver(): + return not TEST_WITH_ROCM + +def has_hipsolver(): + rocm_version = _get_torch_rocm_version() + # hipSOLVER is disabled on ROCM < 5.3 + return rocm_version >= (5, 3) + +# Skips a test on CUDA/ROCM if cuSOLVER/hipSOLVER is not available +def skipCUDAIfNoCusolver(fn): + return skipCUDAIf(not has_cusolver() and not has_hipsolver(), "cuSOLVER not available")(fn) + + +# Skips a test if both cuSOLVER and MAGMA are not available +def skipCUDAIfNoMagmaAndNoCusolver(fn): + if has_cusolver(): + return fn + else: + # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA + return skipCUDAIfNoMagma(fn) + +# Skips a test if both cuSOLVER/hipSOLVER and MAGMA are not available +def skipCUDAIfNoMagmaAndNoLinalgsolver(fn): + if has_cusolver() or has_hipsolver(): + return fn + else: + # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA + return skipCUDAIfNoMagma(fn) + +# Skips a test on CUDA when using ROCm. +def skipCUDAIfRocm(func=None, *, msg="test doesn't currently work on the ROCm stack"): + def dec_fn(fn): + reason = f"skipCUDAIfRocm: {msg}" + return skipCUDAIf(TEST_WITH_ROCM, reason=reason)(fn) + if func: + return dec_fn(func) + return dec_fn + +# Skips a test on CUDA when not using ROCm. +def skipCUDAIfNotRocm(fn): + return skipCUDAIf(not TEST_WITH_ROCM, "test doesn't currently work on the CUDA stack")(fn) + +# Skips a test on CUDA if ROCm is unavailable or its version is lower than requested. +def skipCUDAIfRocmVersionLessThan(version=None): + + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + if self.device_type == 'cuda': + if not TEST_WITH_ROCM: + reason = "ROCm not available" + raise unittest.SkipTest(reason) + rocm_version_tuple = _get_torch_rocm_version() + if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version): + reason = f"ROCm {rocm_version_tuple} is available but {version} required" + raise unittest.SkipTest(reason) + + return fn(self, *args, **kwargs) + + return wrap_fn + return dec_fn + +# Skips a test on CUDA when using ROCm. +def skipCUDAIfNotMiopenSuggestNHWC(fn): + return skipCUDAIf(not TEST_WITH_MIOPEN_SUGGEST_NHWC, "test doesn't currently work without MIOpen NHWC activation")(fn) + +# Skips a test for specified CUDA versions, given in the form of a list of [major, minor]s. +def skipCUDAVersionIn(versions : List[Tuple[int, int]] = None): + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + version = _get_torch_cuda_version() + if version == (0, 0): # cpu or rocm + return fn(self, *args, **kwargs) + if version in (versions or []): + reason = f"test skipped for CUDA version {version}" + raise unittest.SkipTest(reason) + return fn(self, *args, **kwargs) + + return wrap_fn + return dec_fn + +# Skips a test for CUDA versions less than specified, given in the form of [major, minor]. +def skipCUDAIfVersionLessThan(versions : Tuple[int, int] = None): + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + version = _get_torch_cuda_version() + if version == (0, 0): # cpu or rocm + return fn(self, *args, **kwargs) + if version < versions: + reason = f"test skipped for CUDA versions < {version}" + raise unittest.SkipTest(reason) + return fn(self, *args, **kwargs) + + return wrap_fn + return dec_fn + +# Skips a test on CUDA if cuDNN is unavailable or its version is lower than requested. +def skipCUDAIfCudnnVersionLessThan(version=0): + + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + if self.device_type == 'cuda': + if self.no_cudnn: + reason = "cuDNN not available" + raise unittest.SkipTest(reason) + if self.cudnn_version is None or self.cudnn_version < version: + reason = f"cuDNN version {self.cudnn_version} is available but {version} required" + raise unittest.SkipTest(reason) + + return fn(self, *args, **kwargs) + + return wrap_fn + return dec_fn + +# Skips a test on CUDA if cuSparse generic API is not available +def skipCUDAIfNoCusparseGeneric(fn): + return skipCUDAIf(not TEST_CUSPARSE_GENERIC, "cuSparse Generic API not available")(fn) + +def skipCUDAIfNoHipsparseGeneric(fn): + return skipCUDAIf(not TEST_HIPSPARSE_GENERIC, "hipSparse Generic API not available")(fn) + +def skipCUDAIfNoSparseGeneric(fn): + return skipCUDAIf(not (TEST_CUSPARSE_GENERIC or TEST_HIPSPARSE_GENERIC), "Sparse Generic API not available")(fn) + +def skipCUDAIfNoCudnn(fn): + return skipCUDAIfCudnnVersionLessThan(0)(fn) + +def skipCUDAIfMiopen(fn): + return skipCUDAIf(torch.version.hip is not None, "Marked as skipped for MIOpen")(fn) + +def skipCUDAIfNoMiopen(fn): + return skipCUDAIf(torch.version.hip is None, "MIOpen is not available")(skipCUDAIfNoCudnn(fn)) + +def skipLazy(fn): + return skipLazyIf(True, "test doesn't work with lazy tensors")(fn) + +def skipMeta(fn): + return skipMetaIf(True, "test doesn't work with meta tensors")(fn) + +def skipXLA(fn): + return skipXLAIf(True, "Marked as skipped for XLA")(fn) + +def skipMPS(fn): + return skipMPSIf(True, "test doesn't work on MPS backend")(fn) + +def skipPRIVATEUSE1(fn): + return skipPRIVATEUSE1If(True, "test doesn't work on privateuse1 backend")(fn) + +# TODO: the "all" in the name isn't true anymore for quite some time as we have also have for example XLA and MPS now. +# This should probably enumerate all available device type test base classes. +def get_all_device_types() -> List[str]: + return ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda'] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py new file mode 100644 index 0000000000000000000000000000000000000000..938de12e11e482fad9d17c6f733bc7bdce1b4760 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_dist_composable.py @@ -0,0 +1,109 @@ +# Owner(s): ["oncall: distributed"] + +from typing import Tuple + +import torch +import torch.nn as nn + + +class UnitModule(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l1 = nn.Linear(100, 100, device=device) + self.seq = nn.Sequential( + nn.ReLU(), + nn.Linear(100, 100, device=device), + nn.ReLU(), + ) + self.l2 = nn.Linear(100, 100, device=device) + + def forward(self, x): + return self.l2(self.seq(self.l1(x))) + + +class CompositeModel(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l1 = nn.Linear(100, 100, device=device) + self.u1 = UnitModule(device) + self.u2 = UnitModule(device) + self.l2 = nn.Linear(100, 100, device=device) + + def forward(self, x): + return self.l2(self.u2(self.u1(self.l1(x)))) + + +class UnitParamModule(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l = nn.Linear(100, 100, device=device) + self.seq = nn.Sequential( + nn.ReLU(), + nn.Linear(100, 100, device=device), + nn.ReLU(), + ) + self.p = nn.Parameter(torch.randn((100, 100), device=device)) + + def forward(self, x): + return torch.mm(self.seq(self.l(x)), self.p) + + +class CompositeParamModel(nn.Module): + def __init__(self, device: torch.device): + super().__init__() + self.l = nn.Linear(100, 100, device=device) + self.u1 = UnitModule(device) + self.u2 = UnitModule(device) + self.p = nn.Parameter(torch.randn((100, 100), device=device)) + self.register_buffer( + "buffer", torch.randn((100, 100), device=device), persistent=True + ) + + def forward(self, x): + a = self.u2(self.u1(self.l(x))) + b = self.p + return torch.mm(a, b) + + +class FakeSequential(nn.Module): + # Define this class to achieve a desired nested wrapping using the module + # wrap policy with `nn.Sequential` + def __init__(self, *modules: Tuple[nn.Module, ...]) -> None: + super().__init__() + self._module_sequence = list(modules) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + for module in self._module_sequence: + x = module(x) + return x + + +class NestedSequentialModel(nn.Module): + def __init__(self, device: torch.device) -> None: + super().__init__() + # This nested structure exercises traversal order to catch differences + # between valid traversals (e.g. BFS and DFS variations). + self.seq1 = nn.Sequential( + nn.Linear(1, 1, device=device), + FakeSequential( + nn.Linear(1, 1, device=device), + nn.ReLU(), + FakeSequential( + nn.Linear(1, 1, device=device), + ), + nn.ReLU(), + ), + nn.Linear(1, 2, device=device), + ) + self.lin = nn.Linear(2, 2, device=device) + self.seq2 = nn.Sequential( + nn.ReLU(), + nn.Linear(2, 3, device=device), + FakeSequential( + nn.Linear(3, 2, bias=False, device=device), + nn.Linear(2, 4, bias=False, device=device), + ), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.seq2(self.lin(self.seq1(x))) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..04dc80321f8d5242c0878c2ffb0c7ccacbbe4e1d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_distributed.py @@ -0,0 +1,1255 @@ +import faulthandler +import logging +import multiprocessing +import os +import queue +import subprocess +import sys +import tempfile +import threading +import time +import traceback +import types +import unittest +from contextlib import contextmanager +from dataclasses import dataclass +from datetime import timedelta +from enum import Enum +from functools import partial, reduce, wraps +from io import StringIO +from typing import Dict, NamedTuple, Optional, Union +from unittest.mock import patch + +import torch +import torch._dynamo.test_case +import torch.cuda.nccl +import torch.distributed as c10d +import torch.nn as nn +from torch.testing._internal.common_utils import ( + FILE_SCHEMA, + find_free_port, + IS_SANDCASTLE, + retry_on_connect_failures, + skip_but_pass_in_sandcastle, + skip_but_pass_in_sandcastle_if, + TEST_WITH_ROCM, + TEST_WITH_TSAN, + TestCase, +) +from torch.testing._internal.distributed.multi_threaded_pg import ( + _install_threaded_pg, + _uninstall_threaded_pg, + ProcessLocalGroup, +) + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +class TestSkip(NamedTuple): + exit_code: int + message: str + + +TEST_SKIPS = { + "backend_unavailable": TestSkip( + 72, "Skipped because distributed backend is not available." + ), + "small_worldsize": TestSkip(73, "Skipped due to small world size."), + "odd_worldsize": TestSkip(87, "Skipped due to odd world size."), + "no_cuda": TestSkip(74, "CUDA is not available."), + "multi-gpu-1": TestSkip(75, "Need at least 1 CUDA device"), + "multi-gpu-2": TestSkip(77, "Need at least 2 CUDA devices"), + "multi-gpu-3": TestSkip(80, "Need at least 3 CUDA devices"), + "multi-gpu-4": TestSkip(81, "Need at least 4 CUDA devices"), + "multi-gpu-5": TestSkip(82, "Need at least 5 CUDA devices"), + "multi-gpu-6": TestSkip(83, "Need at least 6 CUDA devices"), + "multi-gpu-7": TestSkip(84, "Need at least 7 CUDA devices"), + "multi-gpu-8": TestSkip(85, "Need at least 8 CUDA devices"), + "nccl": TestSkip(76, "c10d not compiled with NCCL support"), + "skipIfRocm": TestSkip(78, "Test skipped for ROCm"), + "no_peer_access": TestSkip(79, "Test skipped because no GPU peer access"), + "generic": TestSkip( + 86, "Test skipped at subprocess level, look at subprocess log for skip reason" + ), + "importerror": TestSkip(88, "Test skipped due to missing import"), +} + + +@dataclass +class DistTestCases: + # Backends that do not support a specific collective + skip_collective = {} + skip_collective["allgather_coalesced"] = {"nccl", "mpi", "ucc"} + skip_collective["reduce"] = set() + skip_collective["sendrecv anysource"] = {"nccl", "ucc"} + skip_collective["cpu barrier"] = {"nccl", "ucc"} + + # Sets showing that something is implemented + backend_feature = {} + backend_feature["gpu"] = {"nccl", "gloo", "ucc"} + backend_feature["cuda"] = {"nccl", "gloo", "ucc"} + backend_feature["ddp"] = {"nccl", "gloo", "ucc"} + backend_feature["subgroup"] = {"nccl", "gloo", "ucc"} + backend_feature["plugin"] = set() + + +def skip_if_no_gpu(func): + """Skips if the world size exceeds the number of GPUs, ensuring that if the + test is run, each rank has its own GPU via ``torch.cuda.device(rank)``.""" + + @wraps(func) + def wrapper(*args, **kwargs): + if not torch.cuda.is_available(): + sys.exit(TEST_SKIPS["no_cuda"].exit_code) + world_size = int(os.environ["WORLD_SIZE"]) + if torch.cuda.device_count() < world_size: + sys.exit(TEST_SKIPS[f"multi-gpu-{world_size}"].exit_code) + + return func(*args, **kwargs) + + return wrapper + + +def skip_if_small_worldsize(func): + @wraps(func) + def wrapper(*args, **kwargs): + if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) <= 2: + sys.exit(TEST_SKIPS["small_worldsize"].exit_code) + + return func(*args, **kwargs) + + return wrapper + + +def skip_if_odd_worldsize(func): + @wraps(func) + def wrapper(*args, **kwargs): + if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) % 2 == 1: + sys.exit(TEST_SKIPS["odd_worldsize"].exit_code) + + return func(*args, **kwargs) + + return wrapper + + +def require_n_gpus_for_nccl_backend(n, backend): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + if backend == "nccl" and torch.cuda.device_count() < n: + sys.exit(TEST_SKIPS[f"multi-gpu-{n}"].exit_code) + else: + return func(*args, **kwargs) + + return wrapper + + return decorator + + +def import_transformers_or_skip(): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + try: + from transformers import ( # noqa: F401 + AutoModelForMaskedLM, + BertConfig, + ) + + return func(*args, **kwargs) + except ImportError: + sys.exit(TEST_SKIPS["importerror"].exit_code) + + return wrapper + + return decorator + + +def skip_if_lt_x_gpu(x): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + if torch.cuda.is_available() and torch.cuda.device_count() >= x: + return func(*args, **kwargs) + sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code) + + return wrapper + + return decorator + + +# This decorator helps avoiding initializing cuda while testing other backends +def nccl_skip_if_lt_x_gpu(backend, x): + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + if backend != "nccl": + return func(*args, **kwargs) + if torch.cuda.is_available() and torch.cuda.device_count() >= x: + return func(*args, **kwargs) + sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code) + + return wrapper + + return decorator + + +def verify_ddp_error_logged(model_DDP, err_substr): + # Verify error was logged in ddp_logging_data. + ddp_logging_data = model_DDP._get_ddp_logging_data() + assert "iteration" in ddp_logging_data + assert "has_error" in ddp_logging_data + assert "error" in ddp_logging_data + logging_err = ddp_logging_data["error"] + # Remove C++ stacktrace if needed. + actual = ( + err_substr + if err_substr.find("\nException raised from ") == -1 + else err_substr.split("\nException raised from ")[0] + ) + assert ( + actual in logging_err + ), f"Did not find expected {actual} in ddp logging data error: {logging_err}" + + +def with_nccl_blocking_wait(func): + """ + Convenience decorator to set/unset TORCH_NCCL_BLOCKING_WAIT flag. Note that use of + this decorator will override the setting of TORCH_NCCL_ASYNC_ERROR_HANDLING for + the particular test. After the test, both TORCH_NCCL_BLOCKING_WAIT and + TORCH_NCCL_ASYNC_ERROR_HANDLING will be restored to their original values. + """ + + @wraps(func) + def wrapper(*args, **kwargs): + # Save and unset TORCH_NCCL_ASYNC_ERROR_HANDLING + try: + cached_nccl_async_error_handling: Union[str, None] = os.environ[ + "TORCH_NCCL_ASYNC_ERROR_HANDLING" + ] + del os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] + except KeyError: + # TORCH_NCCL_ASYNC_ERROR_HANDLING was unset + cached_nccl_async_error_handling = None + + # Save val of TORCH_NCCL_BLOCKING_WAIT and set it. + try: + cached_nccl_blocking_wait: Union[str, None] = os.environ[ + "TORCH_NCCL_BLOCKING_WAIT" + ] + except KeyError: + cached_nccl_blocking_wait = None + finally: + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" + + try: + ret = func(*args, **kwargs) + return ret + finally: + # restore old values. + if cached_nccl_async_error_handling is not None: + os.environ[ + "TORCH_NCCL_ASYNC_ERROR_HANDLING" + ] = cached_nccl_async_error_handling + + if cached_nccl_blocking_wait is not None: + os.environ["TORCH_NCCL_BLOCKING_WAIT"] = cached_nccl_blocking_wait + + return wrapper + + +def with_dist_debug_levels(levels): + """ + Runs a test for each distributed debug level specified in levels. + """ + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + old_level = os.environ.get("TORCH_DISTRIBUTED_DEBUG", None) + for level in levels: + os.environ["TORCH_DISTRIBUTED_DEBUG"] = level + c10d.set_debug_level_from_env() + ret = func(*args, **kwargs) + c10d.barrier() + if old_level is not None: + os.environ["TORCH_DISTRIBUTED_DEBUG"] = old_level + # Only returns test return for last test, but since these are + # unittests the return value is not really used and earlier tests + # would've raised had they failed. + return ret + + return wrapper + + return decorator + + +def requires_gloo(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_gloo_available(), + "c10d was not compiled with the Gloo backend", + ) + + +def requires_nccl_version(version, msg): + if not c10d.is_nccl_available(): + return skip_but_pass_in_sandcastle( + "c10d was not compiled with the NCCL backend", + ) + else: + return skip_but_pass_in_sandcastle_if( + torch.cuda.nccl.version() < version, + "Requires NCCL version greater than or equal to: {}, found: {}, reason: {}".format( + version, torch.cuda.nccl.version(), msg + ), + ) + + +def requires_nccl(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_nccl_available(), + "c10d was not compiled with the NCCL backend", + ) + +def requires_ucc(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_ucc_available(), + "c10d was not compiled with the UCC backend", + ) + +def requires_mpi(): + return skip_but_pass_in_sandcastle_if( + not c10d.is_mpi_available(), + "c10d was not compiled with the MPI backend", + ) + + +def skip_if_rocm(func): + """Skips a test for ROCm""" + func.skip_if_rocm = True + + @wraps(func) + def wrapper(*args, **kwargs): + if not TEST_WITH_ROCM: + return func(*args, **kwargs) + sys.exit(TEST_SKIPS["skipIfRocm"].exit_code) + + return wrapper + + +def skip_if_win32(): + return skip_but_pass_in_sandcastle_if( + sys.platform == "win32", + "This unit test case is not supported on Windows platform", + ) + + +@retry_on_connect_failures +def create_tcp_store( + addr="localhost", + world_size=1, + is_master=True, + timeout=timedelta(minutes=5), + wait_for_workers=True, + jit_class=False, + use_libuv=False +): + """ + Creates a TCP store. Retries if the chosen port is already in use. + """ + port = find_free_port() + if jit_class: + timeout_millisecond = int(timeout / timedelta(milliseconds=1)) + return torch.classes.dist_c10d.TCPStore( + addr, port, world_size, is_master, timeout_millisecond + ) + else: + return c10d.TCPStore( + addr, port, world_size, is_master, wait_for_workers=wait_for_workers, use_libuv=use_libuv + ) + + +if TEST_WITH_TSAN: + # TSAN runs much slower. + TIMEOUT_DEFAULT = 500 +else: + TIMEOUT_DEFAULT = int(os.getenv('DISTRIBUTED_TESTS_DEFAULT_TIMEOUT', '300')) +TIMEOUT_OVERRIDE = {"test_ddp_uneven_inputs": 400} + + +# https://github.com/pytorch/pytorch/issues/75665 +if TEST_WITH_ROCM: + TIMEOUT_OVERRIDE["test_join_kwargs"] = 200 + + +def create_device(interface=None): + if sys.platform == "win32" or interface is None: + return c10d.ProcessGroupGloo.create_device(hostname="127.0.0.1") + else: + return c10d.ProcessGroupGloo.create_device(interface=interface) + + +def get_timeout(test_id) -> int: + return TIMEOUT_OVERRIDE.get(test_id.split(".")[-1], TIMEOUT_DEFAULT) + + +@contextmanager +def captured_output(): + new_out, new_err = StringIO(), StringIO() + old_out, old_err = sys.stdout, sys.stderr + try: + sys.stdout, sys.stderr = new_out, new_err + yield sys.stdout, sys.stderr + finally: + sys.stdout, sys.stderr = old_out, old_err + + +def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1): + """ + Generate a number of basic test cases for sparse reduction. + These cover tensors with a varying number of sparse dimensions and a varying + number of dense dimensions. The only reduction operation we support is sum. + """ + + def generate(rank: int, world_size: int, sparse_dims: int = 1, dense_dims: int = 0): + # First sparse dimension is [0..rank]. + # Subsequent dimensions are always 0, so we know there is + # a non-empty intersection between any two sparse tensors. + indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1)) + shape = [world_size] + [2 for _ in range(dense_dims)] + for _ in range(sparse_dims - 1): + indices = torch.cat((indices, torch.zeros(1, rank + 1))) + shape.append(world_size) + values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)]) + return torch.sparse_coo_tensor(indices, values, shape) + + def compute_sum(fn, world_size: int): + return reduce( + lambda a, b: a + b, [fn(rank, world_size) for rank in range(world_size)] + ) + + return [ + ( + [ + fn(num_inputs * rank + i, num_inputs * world_size) + for i in range(num_inputs) + ], + [compute_sum(fn, num_inputs * world_size) for i in range(num_inputs)], + ) + for fn in [ + partial(generate, sparse_dims=1), + partial(generate, sparse_dims=2), + partial(generate, sparse_dims=3), + partial(generate, dense_dims=1), + partial(generate, dense_dims=2), + partial(generate, dense_dims=3), + ] + ] + + +# HELPER FOR MULTIGPU TESTS +def init_multigpu_helper(world_size: int, backend: str): + """Multigpu tests are designed to simulate the multi nodes with multi + GPUs on each node. Nccl backend requires equal #GPUs in each process. + On a single node, all visible GPUs are evenly + divided to subsets, each process only uses a subset. + """ + nGPUs = torch.cuda.device_count() + visible_devices = range(nGPUs) + + # If rank is less than or equal to number of available GPU's + # then each rank can be mapped to corresponding GPU. + nGPUs_per_process = 1 + if world_size > nGPUs: + nGPUs_per_process = nGPUs // world_size + rank_to_GPU = { + i: list(visible_devices[i * nGPUs_per_process : (i + 1) * nGPUs_per_process]) + for i in range(world_size) + } + return rank_to_GPU + + +tmp_dir: Optional[tempfile.TemporaryDirectory] = None + + +def initialize_temp_directories(init_method: Optional[str] = None) -> None: + global tmp_dir + tmp_dir = tempfile.TemporaryDirectory() + os.environ["TEMP_DIR"] = tmp_dir.name + os.mkdir(os.path.join(tmp_dir.name, "barrier")) + os.mkdir(os.path.join(tmp_dir.name, "test_dir")) + init_dir_path = os.path.join(tmp_dir.name, "init_dir") + os.mkdir(init_dir_path) + # Set init method if specified. + if init_method is not None: + os.environ["INIT_METHOD"] = init_method + else: + os.environ["INIT_METHOD"] = FILE_SCHEMA + os.path.join( + init_dir_path, "shared_init_file" + ) + + +def cleanup_temp_dir() -> None: + if tmp_dir is not None: + tmp_dir.cleanup() + + +# Most tests operate with this worldsize +DEFAULT_WORLD_SIZE = 4 + +# [How does MultiProcessTestCase work?] +# Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by +# default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an +# example which inherits from this class. Its `Setup()` methods calls into +# `MultiProcessTestCase._spawn_processes()` which spawns `world_size()` +# subprocesses. During the spawn, the main process passes the test name to +# subprocesses, and the name is acquired from self.id(). The subprocesses +# then use the provided test function name to retrieve the function attribute +# from the test instance and run it. The main process simply waits for all +# subprocesses to join. + + +class MultiProcessTestCase(TestCase): + MAIN_PROCESS_RANK = -1 + # This exit code is used to indicate that the test code had an error and + # exited abnormally. There are certain tests that might use sys.exit() to + # simulate failures and in those cases, we can't have an exit code of 0, + # but we still want to ensure we didn't run into any other errors. + TEST_ERROR_EXIT_CODE = 10 + + # do not early terminate for distributed tests. + def _should_stop_test_suite(self) -> bool: + return False + + @property + def world_size(self) -> int: + return DEFAULT_WORLD_SIZE + + def join_or_run(self, fn): + @wraps(fn) + def wrapper(self): + if self.rank == self.MAIN_PROCESS_RANK: + self._join_processes(fn) + else: + fn() + + return types.MethodType(wrapper, self) + + # The main process spawns N subprocesses that run the test. + # Constructor patches current instance test method to + # assume the role of the main process and join its subprocesses, + # or run the underlying test function. + def __init__(self, method_name: str = "runTest") -> None: + super().__init__(method_name) + fn = getattr(self, method_name) + setattr(self, method_name, self.join_or_run(fn)) + + def setUp(self) -> None: + super().setUp() + self.skip_return_code_checks = [] # type: ignore[var-annotated] + self.processes = [] # type: ignore[var-annotated] + self.rank = self.MAIN_PROCESS_RANK + self.file_name = tempfile.NamedTemporaryFile(delete=False).name + # pid to pipe consisting of error message from process. + self.pid_to_pipe = {} # type: ignore[var-annotated] + + def tearDown(self) -> None: + super().tearDown() + for p in self.processes: + p.terminate() + # Each Process instance holds a few open file descriptors. The unittest + # runner creates a new TestCase instance for each test method and keeps + # it alive until the end of the entire suite. We must thus reset the + # processes to prevent an effective file descriptor leak. + self.processes = [] + + def _current_test_name(self) -> str: + # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank' + return self.id().split(".")[-1] + + def _start_processes(self, proc) -> None: + self.processes = [] + for rank in range(int(self.world_size)): + parent_conn, child_conn = torch.multiprocessing.Pipe() + process = proc( + target=self.__class__._run, + name="process " + str(rank), + args=(rank, self._current_test_name(), self.file_name, child_conn), + ) + process.start() + logger.info("Started process %s with pid %s", rank, process.pid) + self.pid_to_pipe[process.pid] = parent_conn + self.processes.append(process) + + def _spawn_processes(self) -> None: + proc = torch.multiprocessing.get_context("spawn").Process + self._start_processes(proc) + + class Event(Enum): + GET_TRACEBACK = 1 + + @staticmethod + def _event_listener(parent_pipe, signal_pipe, rank: int): + logger.info("Starting event listener thread for rank %s", rank) + while True: + ready_pipes = multiprocessing.connection.wait([parent_pipe, signal_pipe]) + + if parent_pipe in ready_pipes: + + if parent_pipe.closed: + logger.info( + "Pipe closed for process %s, stopping event listener thread", rank + ) + return + + event = parent_pipe.recv() + logger.info("Received event %s on process %s", event, rank) + + if event == MultiProcessTestCase.Event.GET_TRACEBACK: + # Return traceback to the parent process. + with tempfile.NamedTemporaryFile(mode="r+") as tmp_file: + faulthandler.dump_traceback(tmp_file) + # Flush buffers and seek to read from the beginning + tmp_file.flush() + tmp_file.seek(0) + parent_pipe.send(tmp_file.read()) + + logger.info("Process %s sent traceback", rank) + + if signal_pipe in ready_pipes: + return + + @classmethod + def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe) -> None: + self = cls(test_name) + self.rank = rank + self.file_name = file_name + self.run_test(test_name, parent_pipe) + + def run_test(self, test_name: str, parent_pipe) -> None: + # Start event listener thread. + signal_recv_pipe, signal_send_pipe = torch.multiprocessing.Pipe(duplex=False) + event_listener_thread = threading.Thread( + target=MultiProcessTestCase._event_listener, + args=(parent_pipe, signal_recv_pipe, self.rank), + daemon=True, + ) + event_listener_thread.start() + if sys.platform != "win32" and sys.platform != "darwin": + # Register signal handler to dump stack traces on FATALs. + # Windows and MacOS do not support the signal handlers. + torch._C._set_print_stack_traces_on_fatal_signal(True) + # Show full C++ stacktraces when a Python error originating from C++ is raised. + os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1" + + # self.id() == e.g. '__main__.TestDistributed.test_get_rank' + # We're retrieving a corresponding test and executing it. + try: + getattr(self, test_name)() + except unittest.SkipTest as se: + logger.info( + "Process %s skipping test %s for following reason: %s", self.rank, test_name, str(se) + ) + sys.exit(TEST_SKIPS["generic"].exit_code) + except Exception as e: + logger.error( + "Caught exception: \n%s exiting " + "process %s with exit code: %s", + traceback.format_exc(), self.rank, MultiProcessTestCase.TEST_ERROR_EXIT_CODE + ) + # Send error to parent process. + parent_pipe.send(traceback.format_exc()) + sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE) + finally: + if signal_send_pipe is not None: + signal_send_pipe.send(None) + + assert event_listener_thread is not None + event_listener_thread.join() + # Close pipe after done with test. + parent_pipe.close() + + def _get_timedout_process_traceback(self) -> None: + pipes = [] + for i, process in enumerate(self.processes): + if process.exitcode is None: + pipe = self.pid_to_pipe[process.pid] + try: + pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK) + pipes.append((i, pipe)) + except ConnectionError as e: + logger.error( + "Encountered error while trying to get traceback for process %s: %s", i, e + ) + + # Wait for results. + for rank, pipe in pipes: + try: + # Wait for traceback + if pipe.poll(5): + if pipe.closed: + logger.info( + "Pipe closed for process %s, cannot retrieve traceback", rank + ) + continue + + traceback = pipe.recv() + logger.error( + "Process %s timed out with traceback: \n\n%s", rank, traceback + ) + else: + logger.error( + "Could not retrieve traceback for timed out process: %s", rank + ) + except ConnectionError as e: + logger.error( + "Encountered error while trying to get traceback for process %s: %s", rank, e + ) + + def _join_processes(self, fn) -> None: + timeout = get_timeout(self.id()) + start_time = time.time() + subprocess_error = False + try: + while True: + # check to see if any subprocess exited with an error early. + for (i, p) in enumerate(self.processes): + # This is the exit code processes exit with if they + # encountered an exception. + if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE: + print( + f"Process {i} terminated with exit code {p.exitcode}, terminating remaining processes." + ) + active_children = torch.multiprocessing.active_children() + for ac in active_children: + ac.terminate() + subprocess_error = True + break + if subprocess_error: + break + # All processes have joined cleanly if they all a valid exitcode + if all(p.exitcode is not None for p in self.processes): + break + # Check if we should time out the test. If so, we terminate each process. + elapsed = time.time() - start_time + if elapsed > timeout: + self._get_timedout_process_traceback() + print( + f"Timing out after {timeout} seconds and killing subprocesses." + ) + for p in self.processes: + p.terminate() + break + # Sleep to avoid excessive busy polling. + time.sleep(0.1) + + elapsed_time = time.time() - start_time + + if fn in self.skip_return_code_checks: + self._check_no_test_errors(elapsed_time) + else: + self._check_return_codes(elapsed_time) + finally: + # Close all pipes + for pipe in self.pid_to_pipe.values(): + pipe.close() + + def _check_no_test_errors(self, elapsed_time) -> None: + """ + Checks that we didn't have any errors thrown in the child processes. + """ + for i, p in enumerate(self.processes): + if p.exitcode is None: + raise RuntimeError( + f"Process {i} timed out after {elapsed_time} seconds" + ) + self.assertNotEqual(self.TEST_ERROR_EXIT_CODE, p.exitcode) + + def _check_return_codes(self, elapsed_time) -> None: + """ + Checks that the return codes of all spawned processes match, and skips + tests if they returned a return code indicating a skipping condition. + """ + # If no processes are spawned, there is nothing to check. + if not self.processes: + logger.warning("Note: no subprocesses were spawned, test was likely skipped.") + return + + first_process = self.processes[0] + # first, we check if there are errors in actual processes + # (via TEST_ERROR_EXIT CODE), and raise an exception for those. + # the reason we do this is to attempt to raise a more helpful error + # message than "Process x terminated/timed out" + # TODO: we should pipe the exception of the failed subprocess here. + # Currently, the actual exception is displayed as a logging output. + errored_processes = [ + (i, p) + for i, p in enumerate(self.processes) + if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE + ] + if errored_processes: + error = "" + for i, process in errored_processes: + # Get error from pipe. + error_message = self.pid_to_pipe[process.pid].recv() + error += ( + "Process {} exited with error code {} and exception:\n{}\n".format( + i, MultiProcessTestCase.TEST_ERROR_EXIT_CODE, error_message + ) + ) + + raise RuntimeError(error) + # If no process exited uncleanly, we check for timeouts, and then ensure + # each process exited cleanly. + for i, p in enumerate(self.processes): + if p.exitcode is None: + raise RuntimeError( + f"Process {i} terminated or timed out after {elapsed_time} seconds" + ) + self.assertEqual( + p.exitcode, + first_process.exitcode, + msg="Expect process {} exit code to match Process 0 exit code of {}, but got {}".format( + i, first_process.exitcode, p.exitcode + ), + ) + for skip in TEST_SKIPS.values(): + if first_process.exitcode == skip.exit_code: + if IS_SANDCASTLE: + # Don't use unittest.skip to skip the test on sandcastle + # since it creates tasks for skipped tests assuming there + # is some follow-up needed. Instead just "pass" the test + # with an appropriate message. + logger.info( + "Skipping %s on sandcastle for the following reason: %s", self.id(), skip.message + ) + return + else: + raise unittest.SkipTest(skip.message) + self.assertEqual( + first_process.exitcode, + 0, + msg=f"Expected zero exit code but got {first_process.exitcode} for pid: {first_process.pid}", + ) + + @property + def is_master(self) -> bool: + return self.rank == 0 + + +# Cannot use functools.cache as it requires python 3.9 +EFA_PROBE_RESULT = None + + +def has_efa() -> bool: + """ + If shell command `fi_info -p efa -t FI_EP_RDM` returns exit code 0 then we assume that the machine has + Libfabric EFA interfaces and EFA software components installed, + see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa-start.html. + """ + global EFA_PROBE_RESULT + if EFA_PROBE_RESULT is not None: + return EFA_PROBE_RESULT + + try: + EFA_PROBE_RESULT = ( + subprocess.run(["fi_info", "-p", "efa", "-t", "FI_EP_RDM"], check=False).returncode == 0 + ) + except FileNotFoundError: + EFA_PROBE_RESULT = False + return EFA_PROBE_RESULT + + +def tp_transports(): + """ + If the machine has Libfabric EFA interfaces and EFA software components installed it may cause + 'RuntimeError: In operator() at tensorpipe/common/ibv.h:172 "": Operation not supported' if tensorpipe + uses InfiniBand transport, so we exclude it from tensorpipe transports, + see https://github.com/pytorch/pytorch/issues/73885 and https://github.com/pytorch/pytorch/issues/65022 + """ + return ["shm", "uv"] if has_efa() else None + + +def spawn_threads_and_init_comms( + func=None, timeout=TIMEOUT_DEFAULT, world_size=DEFAULT_WORLD_SIZE +): + """ + Wrapper to use with a test method + """ + if func is None: + return partial( + spawn_threads_and_init_comms, timeout=timeout, world_size=world_size + ) + + + def _run_test_method_with_multi_threads(world_size, callback): + world = _install_threaded_pg() + global_store = c10d.HashStore() + + def world_is_valid(): + return world == c10d.distributed_c10d._world + + def worker(rank, world_pg, store): + c10d.init_process_group( + backend="threaded", rank=rank, world_size=world_size, store=store + ) + try: + callback() + except BaseException as ex: + # Exceptions are handled in MultiThreadedTestCase + MultiThreadedTestCase.exception_queue.put((rank, sys.exc_info())) + ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads + finally: + if world_is_valid(): + c10d.destroy_process_group() + + threads = [] + for rank in range(world_size): + t = threading.Thread(target=worker, args=(rank, world, global_store)) + t.start() + threads.append(t) + + return threads + + + @wraps(func) + def wrapper(self, *args, **kwargs): + # TODO: get test name from kwargs + threads = _run_test_method_with_multi_threads(world_size, lambda: func(self, *args, **kwargs)) + # join and error handling + MultiThreadedTestCase._join_threads(threads, func) + + return wrapper + + +class MultiThreadedTestCase(TestCase): + """ + Test runner that runs all tests with the in-proc process group using + multiple threads with the threaded process group. + + Each test spawns world_size threads and run the test method in each thread. + + Difference from regular MultiProcess test runner: + Must explicitly defines SetUp and call self._spawn_threads() to run the tests. + Cannot use setUp / tearDown (must use perThreadSetup / perThreadShutdown) + to set up / tear down each thread when running each test. + No global state possible + How bad of a limitation is this? + """ + exception_queue = queue.Queue() + + MAIN_THREAD_RANK = -1 + + def join_or_run(self, fn): + @wraps(fn) + def wrapper(self): + if self.rank == self.MAIN_THREAD_RANK: + self._join_threads(self.threads, fn) + else: + fn() + + return types.MethodType(wrapper, self) + + def __init__(self, method_name: str = "runTest") -> None: + super().__init__(method_name) + test_fn = getattr(self, method_name, None) + setattr(self, method_name, self.join_or_run(test_fn)) + + def perThreadSetUp(self): + # super().setUp() # TestCase.setUp() calls torch.manual_seed() + pass + + def perThreadTearDown(self): + pass + + def setUp(self) -> None: + """ + setUp only set up things in the main thread, if you want to configure things + in the spawned threads, use perThreadSetUp + """ + super().setUp() + self.rank = self.MAIN_THREAD_RANK + self.threads = [] + # Show full C++ stacktraces when a Python error originating from C++ is raised. + os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1" + + def tearDown(self): + """ + tearDown only set up things in the main thread, if you want to configure things + in the spawned threads, use perThreadTearDown + """ + super().tearDown() + self.threads = [] + + def _spawn_threads(self): + """ + class method to spawn threads and run test, use this method in the SetUp of your TestCase + """ + test_name = self._current_test_name + # for each test case, we need to create thread local world, and a global store + world = _install_threaded_pg() + self.__class__.global_store = c10d.HashStore() + + def world_is_valid(): + return world == c10d.distributed_c10d._world + + if not world_is_valid(): + raise RuntimeError("Invalid world") + + for rank in range(self.world_size): + t = threading.Thread(target=self.__class__._run, args=(test_name, rank, self.world_size)) + t.start() + self.threads.append(t) + + @classmethod + def _run(cls, test_name, rank, world_size): + self = cls(test_name) + self.rank = rank + + # precision/rel_tol is a thread-local setting since it may be overridden per test, need to make + # every thread have the same value. This would be relevant when we use op db tests, where it + # needs those states to be set i.e. using instantiate_device_type_tests() + # TODO: figure out a better way to do this + if hasattr(self, "_tls"): + self._tls = threading.local() + self._tls.precision = TestCase._precision + self._tls.rel_tol = TestCase._rel_tol + + self.run_test_with_threaded_pg(test_name, rank, world_size) + + def run_test_with_threaded_pg(self, test_name, rank, world_size): + """ + Run the current test associated with `test_name` using the threaded process group. + """ + c10d.init_process_group( + backend="threaded", rank=rank, world_size=world_size, store=self.__class__.global_store + ) + self.perThreadSetUp() + + try: + getattr(self, test_name)() + except BaseException as ex: + self.exception_queue.put((rank, sys.exc_info())) + ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads + finally: + c10d.destroy_process_group() + self.perThreadTearDown() + + + @classmethod + def _join_threads(cls, threads, fn): + timeout = TIMEOUT_DEFAULT + try: + for idx, thread in enumerate(threads): + thread.join(max(0, timeout)) + if thread.is_alive(): + MultiThreadedTestCase.exception_queue.put( + ( + idx, + ( + TimeoutError, + TimeoutError( + f"Rank failed to join in under {timeout} seconds" + ), + None, + ), + ) + ) + ProcessLocalGroup.reset() + failed_ranks = [] + while not cls.exception_queue.empty(): + failure = cls.exception_queue.get() + failed_ranks.append(failure) + finally: + _uninstall_threaded_pg() + + cls._check_return_codes(failed_ranks, timeout, fn) + + @classmethod + def _check_return_codes(cls, failed_ranks, timeout, fn): + # Print based on exceptions raised from threads + # SkipTest: print info for each thread + # TimeoutError: raise RuntimeError for any timed out thread + # Normal Exception: print error for each thread that raises exception + # and raise a RuntimeError + error_msg = "" + skip_code = -1 + for rank, exc_info in failed_ranks: + exc = exc_info[1] + if isinstance(exc, unittest.SkipTest): + logger.info( + "Thread %s skipping test %s for following reason: %s", rank, fn, str(exc) + ) + if skip_code < 0: + skip_code = TEST_SKIPS["generic"].exit_code + elif isinstance(exc, TimeoutError): + msg = f"Thread {rank} terminated or timed out after {timeout} seconds\n" + logger.error(msg) + raise RuntimeError(msg) + elif isinstance(exc, Exception): + msg = "".join(traceback.format_exception(*exc_info)) + logger.error( + "Caught exception: \n%s exiting thread %s", msg, rank + ) + error_msg += ( + f"Thread {rank} exited with exception:\n{msg}\n" + ) + elif isinstance(exc, SystemExit): + if type(exc.code) == int and skip_code < 0: + skip_code = exc.code + + # check exceptions + if len(error_msg) > 0: + raise RuntimeError(error_msg) + # check skip + if skip_code > 0: + for skip in TEST_SKIPS.values(): + if skip_code == skip.exit_code: + if IS_SANDCASTLE: + # "pass" the test with an appropriate message. + logger.info( + "Skipping %s on sandcastle for the following reason: %s", fn, skip.message + ) + return + else: + raise unittest.SkipTest(skip.message) + + @property + def world_size(self) -> int: + return DEFAULT_WORLD_SIZE + + @property + def _current_test_name(self) -> str: + # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank' + return self.id().split(".")[-1] + + def assertEqualOnRank(self, x, y, msg=None, *, rank=0): + """ + The reason why we have this util function instead of + self.assertEqual is all threads are sharing one CPU RNG + so the assertion result is only reliable on rank 0 + """ + if self.rank == rank: + self.assertEqual(x, y, msg) + + def assertNotEqualOnRank(self, x, y, msg=None, *, rank=0): + if self.rank == rank: + self.assertNotEqual(x, y) + + +class SaveForwardInputsModule(nn.Module): + def __init__( + self, + forward_inputs: Dict[nn.Module, torch.Tensor], + cast_forward_inputs: bool, + ) -> None: + super().__init__() + self.l = nn.Linear(100, 100) + self.forward_inputs = forward_inputs + self.cast_forward_inputs = cast_forward_inputs + + def forward(self, x: torch.Tensor) -> torch.Tensor: + self.forward_inputs[self] = x + return self.l(x.to(self.l.weight.dtype) if self.cast_forward_inputs else x) + + +class SaveForwardInputsModel(nn.Module): + def __init__( + self, + forward_inputs: Dict[nn.Module, torch.Tensor], + cast_forward_inputs: bool, + ) -> None: + super().__init__() + self.c1 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs) + self.c2 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs) + self.forward_inputs = forward_inputs + + def forward(self, x: torch.Tensor) -> torch.Tensor: + self.forward_inputs[self] = x + return self.c2(self.c1(x)) + +@contextmanager +def _dynamo_dist_per_rank_init(rank, world_size, init_pg=True): + # To avoid multiple inheritance from _dynamo.test_case.TestCase and MultiProcessTestCase, + # Just manually implement the most important part of the dynamo behavior to reset/clear. + torch.cuda.set_device(rank) + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '6789' + if init_pg: + c10d.init_process_group("nccl", rank=rank, world_size=world_size) + torch._dynamo.reset() + torch._dynamo.utils.counters.clear() + try: + yield + finally: + torch._dynamo.reset() + torch._dynamo.utils.counters.clear() + if init_pg: + c10d.destroy_process_group() + + +class DynamoDistributedSingleProcTestCase(torch._dynamo.test_case.TestCase): + """ + Test harness for single-process dynamo distributed tests, + initializes dist process group. + + Prefer this for simple tests, as it's easier to debug. + """ + + @classmethod + def setUpClass(cls): + super().setUpClass() + # _exit_stack is set up in TestCase + cls._exit_stack.enter_context( + patch.dict( + os.environ, + { + "MASTER_ADDR": "localhost", + "MASTER_PORT": "12355", + }, + ) + ) + cls.rank = 0 + cls.device = f"cuda:{cls.rank}" + cls.device_ids = None if "cuda" in cls.device else [cls.rank] + c10d.init_process_group("nccl", rank=cls.rank, world_size=1) + + @classmethod + def tearDownClass(cls): + c10d.destroy_process_group() + super().tearDownClass() + + +class DynamoDistributedMultiProcTestCase(MultiProcessTestCase): + """ + Use this for tests that actually run on multiple GPUs. + + Decorate tests with @skip_if_lt_x_gpu(ngpu) + + Note: MultiProcTestCase spawns processes per test and is slow. + Prefer MultiThreadedTestCase for most tests. Perhaps use this one + sparingly for integration tests. + """ + def setUp(self): + super().setUp() + self._spawn_processes() + + def tearDown(self): + super().tearDown() + try: + os.remove(self.file_name) + except OSError: + pass + + @property + def world_size(self) -> int: + return torch.cuda.device_count() + + @classmethod + def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe) -> None: + # The rest is copypasta from MultiProcessTestCase._run + self = cls(test_name) + self.rank = rank + self.file_name = file_name + self.run_test(test_name, parent_pipe) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..8d7d2bff25cd99c925308cb0bc5109286ce4422e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_dtype.py @@ -0,0 +1,131 @@ +from typing import List + +import torch + + +# Functions and classes for describing the dtypes a function supports +# NOTE: these helpers should correspond to PyTorch's C++ dispatch macros + +# Verifies each given dtype is a torch.dtype +def _validate_dtypes(*dtypes): + for dtype in dtypes: + assert isinstance(dtype, torch.dtype) + return dtypes + +# class for tuples corresponding to a PyTorch dispatch macro +class _dispatch_dtypes(tuple): + def __add__(self, other): + assert isinstance(other, tuple) + return _dispatch_dtypes(tuple.__add__(self, other)) + +_empty_types = _dispatch_dtypes(()) +def empty_types(): + return _empty_types + +_floating_types = _dispatch_dtypes((torch.float32, torch.float64)) +def floating_types(): + return _floating_types + +_floating_types_and_half = _floating_types + (torch.half,) +def floating_types_and_half(): + return _floating_types_and_half + +def floating_types_and(*dtypes): + return _floating_types + _validate_dtypes(*dtypes) + +_floating_and_complex_types = _floating_types + (torch.cfloat, torch.cdouble) +def floating_and_complex_types(): + return _floating_and_complex_types + +def floating_and_complex_types_and(*dtypes): + return _floating_and_complex_types + _validate_dtypes(*dtypes) + +_double_types = _dispatch_dtypes((torch.float64, torch.complex128)) +def double_types(): + return _double_types + +_integral_types = _dispatch_dtypes((torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)) +def integral_types(): + return _integral_types + +def integral_types_and(*dtypes): + return _integral_types + _validate_dtypes(*dtypes) + +_all_types = _floating_types + _integral_types +def all_types(): + return _all_types + +def all_types_and(*dtypes): + return _all_types + _validate_dtypes(*dtypes) + +_complex_types = _dispatch_dtypes((torch.cfloat, torch.cdouble)) +def complex_types(): + return _complex_types + +def complex_types_and(*dtypes): + return _complex_types + _validate_dtypes(*dtypes) + +_all_types_and_complex = _all_types + _complex_types +def all_types_and_complex(): + return _all_types_and_complex + +def all_types_and_complex_and(*dtypes): + return _all_types_and_complex + _validate_dtypes(*dtypes) + +_all_types_and_half = _all_types + (torch.half,) +def all_types_and_half(): + return _all_types_and_half + +def custom_types(*dtypes): + """Create a list of arbitrary dtypes""" + return _empty_types + _validate_dtypes(*dtypes) + +# The functions below are used for convenience in our test suite and thus have no corresponding C++ dispatch macro + +# See AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS. +def get_all_dtypes(include_half=True, + include_bfloat16=True, + include_bool=True, + include_complex=True, + include_complex32=False, + include_qint=False, + ) -> List[torch.dtype]: + dtypes = get_all_int_dtypes() + get_all_fp_dtypes(include_half=include_half, include_bfloat16=include_bfloat16) + if include_bool: + dtypes.append(torch.bool) + if include_complex: + dtypes += get_all_complex_dtypes(include_complex32) + if include_qint: + dtypes += get_all_qint_dtypes() + return dtypes + +def get_all_math_dtypes(device) -> List[torch.dtype]: + return get_all_int_dtypes() + get_all_fp_dtypes(include_half=device.startswith('cuda'), + include_bfloat16=False) + get_all_complex_dtypes() + +def get_all_complex_dtypes(include_complex32=False) -> List[torch.dtype]: + return [torch.complex32, torch.complex64, torch.complex128] if include_complex32 else [torch.complex64, torch.complex128] + + +def get_all_int_dtypes() -> List[torch.dtype]: + return [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64] + + +def get_all_fp_dtypes(include_half=True, include_bfloat16=True) -> List[torch.dtype]: + dtypes = [torch.float32, torch.float64] + if include_half: + dtypes.append(torch.float16) + if include_bfloat16: + dtypes.append(torch.bfloat16) + return dtypes + + +def get_all_qint_dtypes() -> List[torch.dtype]: + return [torch.qint8, torch.quint8, torch.qint32, torch.quint4x2, torch.quint2x4] + + +float_to_corresponding_complex_type_map = { + torch.float16: torch.complex32, + torch.float32: torch.complex64, + torch.float64: torch.complex128, +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py new file mode 100644 index 0000000000000000000000000000000000000000..90e9faa5ac37db777ea7dad050f2b93e39822609 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_fsdp.py @@ -0,0 +1,1219 @@ +# Owner(s): ["oncall: distributed"] + +import itertools +import os +import re +import sys +from abc import ABC, abstractmethod +from contextlib import nullcontext +from copy import deepcopy +from enum import auto, Enum +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union +from unittest import mock + +import torch +import torch.distributed as dist +import torch.nn as nn +from torch.distributed.fsdp import CPUOffload, FullyShardedDataParallel as FSDP +from torch.distributed.fsdp._common_utils import TrainingState +from torch.distributed.fsdp._init_utils import NO_RESHARD_AFTER_FORWARD_STRATEGIES +from torch.distributed.fsdp.fully_sharded_data_parallel import ( + BackwardPrefetch, + MixedPrecision, + ShardingStrategy, +) +from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler +from torch.distributed.fsdp.wrap import always_wrap_policy, ModuleWrapPolicy, wrap +from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer +from torch.nn.parallel.distributed import DistributedDataParallel as DDP +from torch.testing._internal.common_distributed import ( + MultiProcessTestCase, + MultiThreadedTestCase, + TEST_SKIPS, +) +from torch.testing._internal.common_utils import FILE_SCHEMA, get_cycles_per_ms + + +class FSDPInitMode(Enum): + # No FSDP wrapping + NO_FSDP = auto() + # FSDP recursive wrapping + RECURSIVE = auto() + # TODO: FSDP non-recursive wrapping + # NONRECURSIVE = auto() + + +class CUDAInitMode(Enum): + # Move model to CUDA before passing to the FSDP constructor + CUDA_BEFORE = auto() + # Move model to CUDA after passing to the FSDP constructor + CUDA_AFTER = auto() + # Keep on CPU + CUDA_NEVER = auto() + + +class FSDPTestModel(nn.Module, ABC): + """This defines the interface expected from all models used commonly for + FSDP unit tests.""" + + @abstractmethod + def get_input(self, device) -> Tuple[torch.Tensor, ...]: + """Returns an input for the model as as tuple.""" + ... + + @abstractmethod + def get_loss(self, input, output) -> torch.Tensor: + """Returns the loss given the input and output.""" + ... + + @abstractmethod + def run_backward(self, loss) -> None: + """Runs the backward pass (e.g. including ``loss.backward()``).""" + ... + + @staticmethod + @abstractmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + *init_args: Any, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + **init_kwargs: Any, + ) -> nn.Module: + """Initializes an instance of this model.""" + ... + + +def _assert_module_states( + model: nn.Module, + process_group: dist.ProcessGroup, + assert_fn: Callable, +): + """ + All-gathers module states across ranks and calls ``assert_fn`` on each pair + of corresponding states from rank 0 and a nonzero rank. For example, if + ``assert_fn`` is ``self.assertEqual()``, then this checks that all module + states are equal across ranks. + """ + # Include names for debugging convenience + named_module_states = [ + (param_name, param.detach().cpu()) + for param_name, param in model.named_parameters() + ] + named_module_states += [ + (buffer_name, buffer.detach().cpu()) + for buffer_name, buffer in model.named_buffers() + ] + world_size = dist.get_world_size(process_group) + olist = [None for _ in range(world_size)] + dist.all_gather_object(olist, named_module_states, group=process_group) + rank0_states = olist[0] + for state in olist[1:]: + for (_, p1), (_, p2) in zip(rank0_states, state): + assert_fn(p1, p2) + + +def _zero_model( + model: nn.Module, + zero_buffers: bool = False, + summon_full=True, +): + """Zeros the parameters and optionally buffers of ``model`` in place.""" + ctx = FSDP.summon_full_params(model) if summon_full else nullcontext() + with ctx: + for param in model.parameters(): + with torch.no_grad(): + param.zero_() + if zero_buffers: + for buffer in model.buffers(): + with torch.no_grad(): + buffer.zero_() + + +def _get_state_dict(model, cpu_offload=False, half=False): + if not cpu_offload: + model = model.cuda() + if half: + model.half() + + return model.state_dict() + + +def subtest_name(test_name_mapping, *args): + return "_".join( + [test_name_mapping[str(s)] if s is not None else "none" for s in args] + ) + + +def _broadcast_state_dict(rank, state_dict): + # For non-FSDP roots, some parts of the model state on rank 0 may + # not be on CPU, so we move everything to CPU to avoid issues like: + # https://github.com/pytorch/pytorch/issues/77113. + for param_name, param in state_dict.items(): + if param.device != torch.device("cpu"): + state_dict[param_name] = param.cpu() + + olist = [state_dict if rank == 0 else None] + dist.broadcast_object_list(olist) + state_dict = olist[0] + # Ensure that the state is on CUDA + for param_name in state_dict.keys(): + state_dict[param_name] = state_dict[param_name].cuda() + return state_dict + + +def get_full_params(model: nn.Module, recurse: bool = True): + """ + Returns the full unsharded parameters of ``model``. Any FSDP-managed + parameters offloaded to CPU are moved to GPU in the returned list. + + Args: + recurse (bool): If ``False``, only unshards the parameters immediate to + ``model``; if ``True``, recurses through the module hierarchy + rooted at ``model``. + """ + with FSDP.summon_full_params(model, recurse=recurse): + return deepcopy(list(model.parameters())) + + +def _maybe_cuda(model: nn.Module, move_to_cuda: bool): + return model.cuda() if move_to_cuda else model + + +def _maybe_wrap_fsdp(model: nn.Module, wrap_fsdp: bool, *args, **kwargs): + return model if not wrap_fsdp else FSDP(model, *args, **kwargs) + + +class DummyProcessGroup: + def __init__(self, rank: int, size: int): + self._rank = rank + self._size = size + + def rank(self) -> int: + return self._rank + + def size(self) -> int: + return self._size + + def allreduce(self, *args, **kwargs): + dist_wait = mock.Mock() + + def get_future(): + future = torch.futures.Future() + future.set_result(1) + return future + + dist_wait.get_future = get_future + return dist_wait + + +class TransformerWithSharedParams(FSDPTestModel): + def __init__( + self, + group: dist.ProcessGroup, + cuda_init_mode: CUDAInitMode, + add_bn: bool, + deterministic: bool, + ): + super().__init__() + self.rank = group.rank() + self.world_size = group.size() + if deterministic: + torch.manual_seed(0) + d_vocab = 23 + d_model = 16 + + self.embed_tokens = nn.Embedding(d_vocab, d_model) + self.transformer = nn.Transformer( + d_model=d_model, + num_encoder_layers=2, + num_decoder_layers=2, + dim_feedforward=8, + dropout=0.1, + ) + self.output_proj = nn.Linear(d_model, d_vocab) + + # share the embedding and output projection weights + self.output_proj.weight = self.embed_tokens.weight + self.register_buffer( + "vocab_bias", self.embed_tokens.weight.new_ones((d_model,)) + ) + self.register_buffer( + "long_buffer", + torch.zeros_like(self.vocab_bias, dtype=torch.long), + ) # type: ignore[arg-type] + + self.bs = 2 + self.bn = torch.nn.BatchNorm1d(self.bs) if add_bn else torch.nn.Identity() + if cuda_init_mode == CUDAInitMode.CUDA_BEFORE: + self = self.cuda() + if deterministic: + self.eval() + + def get_input(self, device): + torch.manual_seed(1 + self.rank) # keep everything deterministic + src = torch.arange(12, device=device).view(6, self.bs) # T x B + tgt = torch.arange(self.bs * 4, device=device).view(4, self.bs) # T x B + return (src, tgt) + + def forward(self, src_ids, tgt_ids): + src = self.embed_tokens(src_ids) + src = src + self.vocab_bias + self.long_buffer.type_as(src) # type: ignore[operator] + tgt = self.embed_tokens(tgt_ids) + tgt = self.bn(tgt) + x = self.transformer(src, tgt) + return self.output_proj(x) + + def get_loss(self, input, output): + _, tgt = input + return nn.functional.cross_entropy( + output.view(-1, output.size(-1)), tgt.view(-1), reduction="sum" + ) + + def run_backward(self, loss): + loss.backward() + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + add_bn: bool = True, + ) -> Union[nn.Module, FSDP]: + """ + Initializes a :class:`TransformerWithSharedParams` instance. + + Args: + fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap + any modules with FSDP. If ``RECURSIVE``, then wraps with + top-level FSDP. By default, the top-level FSDP uses the + ``ModuleWrapPolicy`` for encoder and decoder layers, but a + different auto wrap policy may be specified via + ``fsdp_kwargs``. + cuda_init_mode (CUDAInitMode): Determines model movement to CUDA. + fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments + forwarded to the FSDP constructor. + deterministic (bool): Whether to make the model deterministic + across constructions. + add_bn (bool): Whether to include batch norm in the model. + """ + + if fsdp_kwargs is None: + fsdp_kwargs = {} + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + if isinstance(group, tuple): + pg = group[0] + else: + pg = group + return TransformerWithSharedParams( + pg, cuda_init_mode, add_bn, deterministic + ) + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + # Default to the `ModuleWrapPolicy` + if "auto_wrap_policy" not in fsdp_kwargs: + auto_wrap_policy = ModuleWrapPolicy( + { + TransformerEncoderLayer, + TransformerDecoderLayer, + } + ) + else: + auto_wrap_policy = fsdp_kwargs.pop("auto_wrap_policy") + + if ( + "sharding_strategy" in fsdp_kwargs + and fsdp_kwargs["sharding_strategy"] + in {ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2} + and not isinstance(group, tuple) + ): + fsdp_pg = None + else: + fsdp_pg = group + + if isinstance(group, tuple): + tformer_pg = group[0] + else: + tformer_pg = group + + m = TransformerWithSharedParams( + tformer_pg, cuda_init_mode, add_bn, deterministic + ) + fsdp_model = FSDP( + m, + fsdp_pg, + auto_wrap_policy=auto_wrap_policy, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + def get_ignored_modules(self): + return [self.transformer] + + +class NestedWrappedModule(FSDPTestModel): + def __init__( + self, + group: dist.ProcessGroup, + wrap_fsdp: bool, + cuda_init_mode: CUDAInitMode, + deterministic: bool, + **fsdp_kwargs, + ): + super().__init__() + self.rank = group.rank() + self.world_size = group.size() + move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE + + def _maybe_wrap(layer): + if wrap_fsdp: + return FSDP(layer, group, **fsdp_kwargs) + return layer + + if deterministic: + torch.manual_seed(0) + self.module = nn.Sequential( + _maybe_cuda(nn.Linear(8, 4), move_to_cuda), + _maybe_wrap( + nn.Sequential( + _maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)), + _maybe_cuda(nn.Linear(16, 16), move_to_cuda), + ), + ), + _maybe_wrap(_maybe_cuda(nn.Linear(16, 4), move_to_cuda)), + _maybe_cuda(nn.Linear(4, 8), move_to_cuda), + ) + + def get_input(self, device): + torch.manual_seed(1 + self.rank) # keep everything deterministic + return (torch.rand(4, 8, device=device),) + + def forward(self, x): + return self.module(x) + + def get_loss(self, input, output): + loss = output.sum() + return loss + + def run_backward(self, loss): + loss.backward() + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + ) -> nn.Module: + """ + Initializes a :class:`NestedWrappedModule` instance. + + Args: + fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap + any modules with FSDP. If ``RECURSIVE``, then wraps some nested + modules with FSDP but not the top-level module. The model may + later be wrapped with a top-level FSDP external to this method + if desired. + cuda_init_mode (CUDAInitMode): Determines model movement to CUDA. + fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments + forwarded to the FSDP constructor. + deterministic (bool): Whether to make the model deterministic + across constructions. + """ + if fsdp_kwargs is None: + fsdp_kwargs = {} + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + return NestedWrappedModule( + group, + wrap_fsdp=False, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + ) + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + # Does not wrap with top-level FSDP + fsdp_model = NestedWrappedModule( + group, + wrap_fsdp=True, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + +class AlwaysWrapNestedWrappedModule(NestedWrappedModule): + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + ): + """ + Initializes a :class:`NestedWrappedModule` instance, but unlike + :meth:`NestedWrappedModule.init`, for the ``RECURSIVE`` init mode, this + wraps with top-level FSDP and the ``always_wrap_policy()`` auto wrap + policy. + """ + super_ = super(AlwaysWrapNestedWrappedModule, AlwaysWrapNestedWrappedModule) + model = super_.init( + group=group, + fsdp_init_mode=FSDPInitMode.NO_FSDP, + cuda_init_mode=cuda_init_mode, + fsdp_kwargs=fsdp_kwargs, + deterministic=deterministic, + ) + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + return model + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + fsdp_model = FSDP(model, auto_wrap_policy=always_wrap_policy, **fsdp_kwargs) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + + +class NonUniformReqGradNWM(NestedWrappedModule): + def __init__( + self, + group: dist.ProcessGroup, + wrap_fsdp: bool, + cuda_init_mode: CUDAInitMode, + deterministic: bool, + **fsdp_kwargs, + ): + super(NestedWrappedModule, self).__init__() + # This `__init__` only differs from `NestedWrappedModule.__init__` in that + # the last two `nn.Linear` layers are FSDP wrapped in a `nn.Sequential` + # container. This arrangement results in all elements of the last two parameters + # residing on a single rank. Freezing all parameters except those two allows us + # to verify that `ShardedGradScaler` accommodates situations where some ranks + # have no (non-zero sized) parameter shards. + self.rank = group.rank() + self.world_size = group.size() + move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE + + def _maybe_wrap(layer): + if wrap_fsdp: + return FSDP(layer, group, **fsdp_kwargs) + return layer + + if deterministic: + torch.manual_seed(0) + self.module = nn.Sequential( + _maybe_cuda(nn.Linear(8, 4), move_to_cuda), + _maybe_wrap( + nn.Sequential( + _maybe_wrap(_maybe_cuda(nn.Linear(4, 16), move_to_cuda)), + _maybe_cuda(nn.Linear(16, 16), move_to_cuda), + ), + ), + _maybe_wrap( + nn.Sequential( + _maybe_cuda(nn.Linear(16, 4), move_to_cuda), + _maybe_cuda(nn.Linear(4, 8), move_to_cuda), + ), + ), + ) + + @staticmethod + def _set_nonuniform_req_grad(model, req_grad_mask) -> None: + for n, p in model.named_parameters(): + if not re.match(req_grad_mask, n): + p.requires_grad_(False) + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + ): + """ + Initializes a :class:`NestedWrappedModule` instance, but unlike + :meth:`NestedWrappedModule.init`, it wraps a second :class:`torch.nn.Sequential` + container to enable the desired non-uniform ``requires_grad`` + ``use_orig_params=True`` tests. For both ``RECURSIVE`` and ``NO_FSDP`` + init modes, freezes all parameters except the last two to validate + ``ShardedGradScaler`` support for ranks with no (non-zero sized) local shards in + FSDP ``use_orig_params=True`` mode. + """ + # The parameters that should remain unfrozen are in `module.2.1`. The regex + # pattern below matches the relevant parameter names both with and without + # an interstitial FSDP module indicator (`_fsdp_wrapped_module`) present. + req_grad_pattern = re.compile(r"module\.2.*\.1.*") + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + ddp_model = NonUniformReqGradNWM( + group, + wrap_fsdp=False, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + ) + NonUniformReqGradNWM._set_nonuniform_req_grad(ddp_model, req_grad_pattern) + return ddp_model + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + if fsdp_kwargs is None: + fsdp_kwargs = {} + fsdp_model = NonUniformReqGradNWM( + group, + wrap_fsdp=True, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + NonUniformReqGradNWM._set_nonuniform_req_grad(fsdp_model, req_grad_pattern) + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + +class ModuleWithDelay(FSDPTestModel): + """This class wraps a :class:`FSDPTestModel` to optionally add a delay + after computing the loss and/or before the gradient reduction.""" + + def __init__( + self, + module: nn.Module, + delay_after_loss_ms: int, + delay_before_reduction_ms: int, + ): + super().__init__() + self.delay_after_loss_ms = delay_after_loss_ms + self.delay_before_reduction_ms = delay_before_reduction_ms + self.module = module + + def get_input(self, device): + return self.module.get_input(device) + + def forward(self, x): + return self.module(x) + + def get_loss(self, input, output): + loss = self.module.get_loss(input, output) + if self.delay_after_loss_ms > 0: + torch.cuda._sleep(int(self.delay_after_loss_ms * get_cycles_per_ms())) + return loss + + def run_backward(self, loss): + orig_reduce_scatter = torch.distributed.reduce_scatter_tensor + + def _delayed_reduce_scatter(*args, **kwargs): + if self.delay_before_reduction_ms > 0: + torch.cuda._sleep( + int(self.delay_before_reduction_ms * get_cycles_per_ms()) + ) + return orig_reduce_scatter(*args, **kwargs) + + with mock.patch( + "torch.distributed.reduce_scatter_tensor", _delayed_reduce_scatter + ): + self.module.run_backward(loss) + + @staticmethod + def init( + module_class: Type[FSDPTestModel], + *model_args: Any, + delay_after_loss_ms: int, + delay_before_reduction_ms: int, + **model_kwargs: Any, + ): + """ + Args: + module_class (Type[FSDPTestModel]): Wrapped module class to which + to add delays. + model_args: Positional arguments forwarded to the ``module_class`` + ``init()``. + delay_after_loss_ms (int): Delay after computing the loss/before + the optimizer step (in ms). + delay_before_reduction_ms (int): Delay before reduce-scattering + gradients (in ms). + model_kwargs: Keyword arguments forwarded to the ``module_class`` + ``init()``. + """ + return ModuleWithDelay( + module_class.init(*model_args, **model_kwargs), + delay_after_loss_ms, + delay_before_reduction_ms, + ) + + +class NestedWrappedModuleWithDelay(ModuleWithDelay): + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode = CUDAInitMode.CUDA_AFTER, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + delay_after_loss_ms: int = 0, + delay_before_reduction_ms: int = 0, + ): + return super(NestedWrappedModuleWithDelay, NestedWrappedModuleWithDelay).init( + NestedWrappedModule, + group=group, + fsdp_init_mode=fsdp_init_mode, + cuda_init_mode=cuda_init_mode, + fsdp_kwargs=fsdp_kwargs, + deterministic=deterministic, + delay_after_loss_ms=delay_after_loss_ms, + delay_before_reduction_ms=delay_before_reduction_ms, + ) + + +class DummyDDP(nn.Module): + def __init__(self, module): + super().__init__() + self.module = module + + def forward(self, *args, **kwargs): + return self.module(*args, **kwargs) + + +class MixtureOfExperts(NestedWrappedModule): + def __init__( + self, + group: dist.ProcessGroup, + wrap_fsdp: bool, + cuda_init_mode: CUDAInitMode, + delay_before_free_ms: int, + deterministic: bool, + **fsdp_kwargs, + ): + super().__init__( + group=group, + wrap_fsdp=wrap_fsdp, + cuda_init_mode=cuda_init_mode, + deterministic=deterministic, + ) + self.group = group + self.delay_before_free_ms = delay_before_free_ms + self.wrap_fsdp = wrap_fsdp + self.move_to_cuda = cuda_init_mode == CUDAInitMode.CUDA_BEFORE + if deterministic: + # Give each rank different expert parameters + torch.manual_seed(42 + self.rank) + d_expert = 23 + d_shared = 12 + d_input = 8 + expert = _maybe_cuda(nn.Linear(d_expert, d_shared), self.move_to_cuda) + + self.num_expert_params = sum([p.numel() for p in expert.parameters()]) + for p in expert.parameters(): + p.expert = True # type: ignore[attr-defined] + + if deterministic: + # Keep all other parameters the same across ranks + torch.manual_seed(0) + + shared = _maybe_cuda(nn.Linear(d_shared, d_expert), self.move_to_cuda) + + if wrap_fsdp: + # we create a process group of size 1 for the expert params + expert_group = torch.distributed.new_group( + [group.rank()] + ) # world size 1 means no shard + expert = FSDP(expert, expert_group, **fsdp_kwargs) # type: ignore[assignment] + shared = FSDP(shared, group, **fsdp_kwargs) # type: ignore[assignment] + + self.module = nn.Sequential( + _maybe_cuda(nn.Linear(d_input, d_shared), self.move_to_cuda), + shared, + expert, + _maybe_cuda(nn.Linear(d_shared, d_input), self.move_to_cuda), + ) + + def forward(self, x): + if self.delay_before_free_ms > 0: + expert = self.module[2] + if isinstance(expert, FSDP): + orig_reshard = torch.distributed.fsdp._runtime_utils._reshard + + def _delayed_reshard(*args, **kwargs): + torch.cuda._sleep( + int(self.delay_before_free_ms * get_cycles_per_ms()) + ) + return orig_reshard(*args, **kwargs) + + # This patch covers any `import torch..._reshard` uses. + with mock.patch( + "torch.distributed.fsdp._runtime_utils._reshard", _delayed_reshard + ): + return self.module(x) + + return self.module(x) + + def run_backward(self, loss): + loss.backward() + # Manually reduce gradients if not wrapped in FullyShardedDataParallel + if not self.wrap_fsdp: + with torch.no_grad(): + for p in self.parameters(): + if hasattr(p, "expert"): + continue # these params don't need grad reduction + p.grad.div_(self.world_size) + torch.distributed.all_reduce(p.grad, group=self.group) + + @staticmethod + def init( + group: dist.ProcessGroup, + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + fsdp_kwargs: Optional[Dict[str, Any]] = None, + deterministic: bool = False, + delay_before_free_ms: int = 0, + ): + """ + Initializes a :class:`MixtureOfExperts` instance. + + Args: + fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap + any modules with FSDP. If ``RECURSIVE``, then wraps some nested + modules with FSDP, including the expert and shared layers, but + not the top-level module. The model may later be wrapped with a + top-level FSDP external to this method if desired. + cuda_init_mode (CUDAInitMode): Determines model movement to CUDA. + fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments + forwarded to the FSDP constructor. + deterministic (bool): Whether to make the model deterministic + across constructions. + delay_before_free_ms (int): Delay before resharding expert + parameters in the forward pass (in ms). + """ + if fsdp_kwargs is None: + fsdp_kwargs = {} + if fsdp_init_mode == FSDPInitMode.NO_FSDP: + return MixtureOfExperts( + group, + wrap_fsdp=False, + cuda_init_mode=cuda_init_mode, + delay_before_free_ms=delay_before_free_ms, + deterministic=deterministic, + ) + elif fsdp_init_mode == FSDPInitMode.RECURSIVE: + # Does not wrap with top-level FSDP + fsdp_model = MixtureOfExperts( + group, + wrap_fsdp=True, + cuda_init_mode=cuda_init_mode, + delay_before_free_ms=delay_before_free_ms, + deterministic=deterministic, + **fsdp_kwargs, + ) + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + return fsdp_model + raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") + + +def run_subtests( + cls_inst, + subtest_config: Dict[str, List[Any]], + test_fn: Callable, + *test_args, + **test_kwargs: Any, +): + """ + Runs a test function given by ``test_fn`` as a subtest according to the + configurations specified by ``subtest_config``. This amortizes the + costly setup overhead (including process spawn and initializing the + process group) over the subtests. + + Args: + subtest_config (Dict[str, List[Any]]): A mapping from subtest + keyword argument name to a list of its possible values. + test_fn (Callable): A callable that runs the actual test. + test_args: Positional arguments to pass to ``test_fn``. + test_kwargs: Keyword arguments to pass to ``test_fn``. + """ + # Convert the config mapping to a list to have a fixed order + subtest_config_items: List[Tuple[str, List[Any]]] = list(subtest_config.items()) + subtest_config_keys: List[str] = [item[0] for item in subtest_config_items] + subtest_config_values: List[List[Any]] = [item[1] for item in subtest_config_items] + for values in itertools.product(*subtest_config_values): + # Map keyword to chosen value + subtest_kwargs = dict(zip(subtest_config_keys, values)) + with cls_inst.subTest(**subtest_kwargs): + test_fn(*test_args, **test_kwargs, **subtest_kwargs) + dist.barrier() + + +class FSDPTestMultiThread(MultiThreadedTestCase): + @property + def world_size(self): + return torch.cuda.device_count() if torch.cuda.is_available() else 4 + + def setUp(self): + super().setUp() + self._spawn_threads() + + def run_subtests(self, *args, **kwargs): + return run_subtests(self, *args, **kwargs) + + +class FSDPTest(MultiProcessTestCase): + def setUp(self): + super().setUp() + # Set TORCH_NCCL_DESYNC_DEBUG=0 to disable the NCCL `workCleanupLoop()`, + # which can cause unit test flakiness: + # https://github.com/pytorch/pytorch/issues/90848 + os.environ["TORCH_NCCL_DESYNC_DEBUG"] = "0" + self._spawn_processes() + + @property + def world_size(self): + return min(torch.cuda.device_count(), 8) if torch.cuda.is_available() else 4 + + @property + def process_group(self): + return dist.distributed_c10d._get_default_group() + + @property + def init_method(self): + return f"{FILE_SCHEMA}{self.file_name}" + + def _check_cpu_offload(self, fsdp_model, cpu_offload): + self.assertEqual(cpu_offload, fsdp_model.cpu_offload) + + def _check_backward_prefetch(self, fsdp_model, backward_prefetch): + self.assertEqual(backward_prefetch, fsdp_model.backward_prefetch) + + def _check_forward_prefetch(self, fsdp_model, forward_prefetch): + self.assertEqual(forward_prefetch, fsdp_model.forward_prefetch) + + def run_subtests(self, *args, **kwargs): + return run_subtests(self, *args, **kwargs) + + @classmethod + def _run(cls, rank, test_name, file_name, pipe): + self = cls(test_name) + self.rank = rank + self.file_name = file_name + + print(f"dist init r={self.rank}, world={self.world_size}") + + # Specify gloo backend to make 'init_process_group()' succeed, + # Actual tests will be skipped if there is no enough GPUs. + backend = "nccl" if torch.cuda.is_available() else "gloo" + + try: + dist.init_process_group( + init_method=self.init_method, + backend=backend, + world_size=int(self.world_size), + rank=self.rank, + ) + except RuntimeError as e: + if "recompile" in e.args[0]: + sys.exit(TEST_SKIPS["backend_unavailable"].exit_code) + + raise + + if torch.cuda.is_available() and torch.cuda.device_count(): + torch.cuda.set_device(self.rank % torch.cuda.device_count()) + + # Execute barrier prior to running test to ensure that every process + # has finished initialization and that the following test + # immediately exiting due to a skip doesn't cause flakiness. + dist.barrier() + + self.run_test(test_name, pipe) + + dist.barrier() + + dist.destroy_process_group() + + def _train_for_several_steps( + self, + model: nn.Module, + num_steps: int, + autocast: bool, + lr: float = 0.01, + fsdp_cpu_offload: Optional[CPUOffload] = None, + save_model: bool = False, + mixed_precision: Optional[MixedPrecision] = None, + enable_sharded_grad_scaler: bool = False, + use_pure_fp16: bool = False, + sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None, + ): + cpu_offload_params = fsdp_cpu_offload and fsdp_cpu_offload.offload_params + + model_device = next(model.parameters()).device + if sharded_grad_scaler_kwargs is None: + sharded_grad_scaler_kwargs = {} + sharded_grad_scaler = ShardedGradScaler( + enabled=enable_sharded_grad_scaler, **sharded_grad_scaler_kwargs + ) + # use SGD with momentum instead of Adam, since Adam is scale invariant + # and this makes it bad for tests + optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9) + for _ in range(num_steps): + optim.zero_grad() + with torch.cuda.amp.autocast(enabled=autocast): + # Inputs always cuda regardless of cpu offloading, or model.device + input = model.module.get_input(torch.device("cuda")) + if use_pure_fp16 or (mixed_precision and not isinstance(model, FSDP)): + if isinstance(input, torch.Tensor): + input = input.half() + else: + input = tuple(x.half() for x in input) + output = model(*input) + # Post-forward, if CPU offloading model param should be on CPU. + if ( + cpu_offload_params + and isinstance(model, FSDP) + # If not resharding after forward, the parameters are still + # exposed as unsharded views into the GPU flat parameter + and model.sharding_strategy + not in NO_RESHARD_AFTER_FORWARD_STRATEGIES + ): + for p in model.parameters(): + # Params should always be on CPU + self.assertEqual(p.device, torch.device("cpu")) + + loss = model.module.get_loss(input, output).to(model_device) + loss = sharded_grad_scaler.scale(loss) + + if not mixed_precision and not use_pure_fp16: + assert ( + loss.dtype == torch.float32 + ), "loss data type should be float32, as the original \ + parameter data type is float32." + else: + if use_pure_fp16: + self.assertEqual(loss.dtype, torch.float16) + # FSDP loss is fp16, DDP AMP loss is fp32 + elif isinstance(model, FSDP): + self.assertEqual(loss.dtype, mixed_precision.param_dtype) + else: + self.assertEqual(loss.dtype, torch.float32) + model.module.run_backward(loss) + # Post-backward, if CPU offloading model params should be on CPU. + if cpu_offload_params and isinstance(model, FSDP): + for p in model.parameters(): + # Params should always be on CPU + self.assertEqual(p.device, torch.device("cpu")) + # Unscale the gradients and step + sharded_grad_scaler.step(optim) + # Update the scale factor + sharded_grad_scaler.update() + # if save_model, simulate save + load. + if save_model: + state_dict = {k: v.clone() for k, v in model.state_dict().items()} + # Zero params, if save/load state_dict did not work properly, this + # would break the parity test with DDP. + _zero_model(model) + model.load_state_dict(state_dict) + + if isinstance(model, FSDP): + model._assert_state(TrainingState.IDLE) + return loss.detach() + + def _test_fsdp_parity( + self, + model_class: Type[FSDPTestModel], + fsdp_init_mode: FSDPInitMode, + cuda_init_mode: CUDAInitMode, + ref_init_fn: Optional[Callable] = None, + num_iters: int = 2, + save_model: bool = True, + cpu_offload: CPUOffload = CPUOffload(), + backward_prefetch: Optional[BackwardPrefetch] = None, + sharding_strategy: Optional[ShardingStrategy] = None, + mixed_precision: Optional[MixedPrecision] = None, + forward_prefetch: bool = False, + use_orig_params: bool = False, + enable_sharded_grad_scaler: bool = False, + use_pure_fp16: bool = False, + init_kwargs: Optional[Dict[str, Any]] = None, + sharded_grad_scaler_kwargs: Optional[Dict[str, Any]] = None, + **fsdp_kwargs, + ): + """ + Tests FSDP training against a reference, which defaults to DDP but + may be customized with ``ref_init_fn``. + + Args: + model_class (Type[FSDPTestModel]): A model class that inherits from + ``FSDPTestModel``, which defines the expected interface. + fsdp_init_mode (FSDPInitMode): The mode to initialize the + FSDP-wrapped model. This should not be ``NO_FSDP``. + ref_init_fn (Optional[Callable]): A callable to invoke that wraps a + non-wrapped model to construct the reference model, where this + wrapper should provide data parallel semantics. If ``None``, + then the callable defaults to the DDP constructor. + """ + assert ( + fsdp_init_mode != FSDPInitMode.NO_FSDP + ), "Expects an FSDP init mode that wraps with FSDP" + if init_kwargs is None: + init_kwargs = {} + lr = 1e-2 + rank = self.process_group.rank() + # Establish reference behavior with DDP + model = model_class.init( + self.process_group, + FSDPInitMode.NO_FSDP, + CUDAInitMode.CUDA_BEFORE, + deterministic=True, + **init_kwargs, + ) + if ref_init_fn is None: + ref_model = DDP(model, device_ids=[rank], output_device=rank) + else: + ref_model = ref_init_fn(model) + if use_pure_fp16: + ref_model = ref_model.half() + ref_loss = self._train_for_several_steps( + ref_model, + num_iters, + autocast=mixed_precision is not None, + lr=lr, + fsdp_cpu_offload=cpu_offload, + mixed_precision=mixed_precision, + enable_sharded_grad_scaler=enable_sharded_grad_scaler, + use_pure_fp16=use_pure_fp16, + sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs, + ) + ddp_params = list(ref_model.parameters()) + # Check against FSDP behavior + fsdp_kwargs.update( + { + "cpu_offload": cpu_offload, + "backward_prefetch": backward_prefetch, + "sharding_strategy": sharding_strategy, + "mixed_precision": mixed_precision, + "forward_prefetch": forward_prefetch, + "use_orig_params": use_orig_params, + } + ) + try: + fsdp_model = model_class.init( + self.process_group, + fsdp_init_mode, + cuda_init_mode, + fsdp_kwargs, + deterministic=True, + **init_kwargs, + ) + except Exception as e: + raise ValueError(f"Initializing {model_class} raised error {str(e)}") from e + if not isinstance(fsdp_model, FSDP): + # Enforce that we wrap with top-level FSDP since we are comparing + # assuming a data parallel reference and some test models may not + # do so in their `init()` method + fsdp_model = FSDP(fsdp_model, self.process_group, **fsdp_kwargs) + if use_pure_fp16: + # Change the model parameter dtype after FSDP initialization + fsdp_model = fsdp_model.half() + if cuda_init_mode == CUDAInitMode.CUDA_AFTER: + fsdp_model = fsdp_model.cuda() + offload_params = cpu_offload is not None and cpu_offload.offload_params + # Offloading parameters with `CUDA_AFTER` should raise an error during + # lazy initialization due to the parameter devices not being CPU; + # otherwise, all parameter devices should be CPU + expects_device_error = ( + offload_params and cuda_init_mode == CUDAInitMode.CUDA_AFTER + ) + expects_cpu_device = ( + offload_params and cuda_init_mode != CUDAInitMode.CUDA_AFTER + ) + if expects_cpu_device: + cpu_device = torch.device("cpu") + for param in fsdp_model.parameters(): + self.assertEqual(param.device, cpu_device) + context = ( + self.assertRaisesRegex( + RuntimeError, + "An FSDP-managed module with parameter CPU offloading enabled " + "has parameters on cuda", + ) + if expects_device_error + else nullcontext() + ) + with context: + fsdp_loss = self._train_for_several_steps( + fsdp_model, + num_iters, + autocast=False, + lr=lr, + fsdp_cpu_offload=cpu_offload, + save_model=save_model, + mixed_precision=mixed_precision, + enable_sharded_grad_scaler=enable_sharded_grad_scaler, + use_pure_fp16=use_pure_fp16, + sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs, + ) + # No need to check for parameter and loss parity if expecting an error + if expects_device_error: + return + # Check parameter devices are CPU if offloading to CPU before calling + # `get_full_params()`, which will cast the parameters to FP32 + if offload_params: + for param in fsdp_model.parameters(): + self.assertEqual(param.device, cpu_device) + fsdp_loss = fsdp_loss.cuda() + fsdp_unsharded_params = get_full_params(fsdp_model) + # Do not check dtype since the reference DDP loss may not be the same + # dtype as the FSDP loss in the case of mixed precision + torch.testing.assert_close(ref_loss, fsdp_loss, check_dtype=False) + # Do not check for parameter parity if using mixed precision since (1) + # the DDP parameters are in FP16 (from `half()`) while the FSDP + # parameters are in FP32 (from `summon_full_params()`) and (2) DDP runs + # the optimizer in FP16 while FSDP runs it in FP32 + # TODO: Disable checking the parameters for pure FP16 due to floating + # point inaccuracy. Note that this means that the backward pass is not + # checked: https://github.com/pytorch/pytorch/issues/90784 + if mixed_precision is None and not use_pure_fp16: + self.assertEqual( + ddp_params, + fsdp_unsharded_params, + exact_device=True, + msg="FSDP did not match DDP", + ) + + +class SkipModule(nn.Module): + def __init__(self): + super().__init__() + self.lin = nn.Linear(10, 10, bias=False) + + def forward(self, x): + return self.lin(x) + + +class NestedLinear(nn.Module): + def __init__(self, fsdp_wrap): + super().__init__() + if fsdp_wrap: + self.nested_linear = wrap(nn.Linear(10, 10, bias=False).cuda()) + else: + self.nested_linear = nn.Linear(10, 10, bias=False).cuda() + + def forward(self, x): + return self.nested_linear(x) + + +class SkipModel(nn.Module): + def __init__(self, double_nest): + super().__init__() + self.linear = nn.Linear(10, 10, bias=False).cuda() + self.linear_skip = SkipModule().cuda() + self.nested_linear = wrap(NestedLinear(fsdp_wrap=double_nest)) + + def forward(self, x): + x = self.linear(x) + x = self.linear_skip(x) + x = self.nested_linear(x) + return x diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py new file mode 100644 index 0000000000000000000000000000000000000000..25b7bd8be0511fecfc79d37fd9f7ef5b84f821df --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_jit.py @@ -0,0 +1,321 @@ +# Torch +import torch +import torch.cuda +import torch.jit +import torch.jit._logging +import torch.jit.frontend +import torch.jit.quantized + +# Testing utils +from torch.testing._internal.common_dtype import floating_and_complex_types_and +from torch.testing._internal.common_utils import TestCase, \ + freeze_rng_state, TemporaryFileName, enable_profiling_mode_for_profiling_tests, is_iterable_of_tensors +from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401 + +# Standard library +from itertools import chain +from typing import List, Union +from torch._C import TensorType + +import io + +def check_output_types(self, func, ref_outputs, args, kwargs): + graph = getattr(func, 'last_graph', None) + types = [o.type() for o in graph.outputs()] + self.assertTrue(len(types) == 1) + t = types[0] + torch._C._jit_assert_is_instance(ref_outputs, t) + +# Test names in this set are only checked for a single derivative +nn_functional_single_grad = frozenset('test_nn_' + name for name in [ + 'pdist', + 'multilabel_margin_loss', + 'max_unpool3d', + 'multi_margin_loss', + 'binary_cross_entropy', + 'binary_cross_entropy_size_average', + 'ctc_loss', + 'grid_sample', +]) + +def check_against_reference(self, func, reference_func, output_func, args, kwargs=None, + allow_unused=True, check_types=True, no_grad=False, no_gradgrad=False): + """Verifies a function performs identically to some reference implementation. + + Commonly, this is used to verify that a JIT implementation + (output_func) matches the behavior of the eager implementation + (reference_func). + """ + kwargs = kwargs if kwargs else {} + + def allSum(vs): + if isinstance(vs, torch.Tensor): + vs = (vs,) + return sum((i + 1) * v.sum().abs() if v.dtype.is_complex else (i + 1) * v.sum() + for i, v in enumerate(vs) + if v is not None and v.dtype in floating_and_complex_types_and(torch.half, torch.bfloat16)) + + def clone_tensor(t, preserve_requires_grad): + require_grad = preserve_requires_grad and t.requires_grad + return t.detach().clone().requires_grad_(require_grad) + + def clone_inputs(preserve_requires_grad: bool): + inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = [] + + for arg in args: + if isinstance(arg, torch.Tensor): + inputs.append(clone_tensor(arg, preserve_requires_grad)) + elif is_iterable_of_tensors(arg): + inputs.append([clone_tensor(t, preserve_requires_grad) for t in arg]) + else: + inputs.append(arg) + + return inputs + + # Returns tensors in args that requires_grad, including tensors in TensorList args + def get_recording_tensors(args): + recording_tensors: List[torch.Tensor] = [] + + for arg in args: + if isinstance(arg, torch.Tensor) and arg.requires_grad: + recording_tensors.append(arg) + elif is_iterable_of_tensors(arg): + recording_tensors.extend(filter(lambda t: t.requires_grad, arg)) + + return recording_tensors + + # test no gradients case + nograd_inputs = clone_inputs(preserve_requires_grad=False) + outputs = self.runAndSaveRNG(reference_func, nograd_inputs, kwargs) + with enable_profiling_mode_for_profiling_tests(): + outputs_test = self.runAndSaveRNG(func, nograd_inputs, kwargs) + self.assertEqual(outputs, outputs_test) + + if check_types: + check_output_types(self, func, outputs_test, nograd_inputs, kwargs) + + if no_grad: + # skip grad tests + return + + with enable_profiling_mode_for_profiling_tests(): + # test single grad case + recording_inputs = clone_inputs(preserve_requires_grad=True) + recording_tensors = get_recording_tensors(recording_inputs) + outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs)) + grads = torch.autograd.grad(allSum(outputs), recording_tensors, + allow_unused=allow_unused) + outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs)) + grads_test = torch.autograd.grad(allSum(outputs_test), recording_tensors, + allow_unused=allow_unused) + self.assertEqual(outputs, outputs_test) + self.assertEqual(grads, grads_test) + # test the grad grad case + if self._testMethodName in nn_functional_single_grad or no_gradgrad: + return + + outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs)) + l1 = allSum(outputs) + grads = torch.autograd.grad(l1, recording_tensors, create_graph=True, + allow_unused=allow_unused) + + l2 = (allSum(grads) * l1) + grads2 = torch.autograd.grad(l2, recording_tensors, allow_unused=allow_unused) + recording_inputs = clone_inputs(preserve_requires_grad=True) + recording_tensors = get_recording_tensors(recording_inputs) + outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs)) + l1_test = allSum(outputs_test) + grads_test = torch.autograd.grad( + l1_test, recording_tensors, create_graph=True, allow_unused=allow_unused) + + l2_test = (allSum(grads_test) * l1_test) + grads2_test = torch.autograd.grad(l2_test, recording_tensors, allow_unused=allow_unused) + + self.assertEqual(outputs, outputs_test) + self.assertEqual(grads, grads_test) + for g2, g2_test in zip(grads2, grads2_test): + if g2 is None and g2_test is None: + continue + self.assertEqual(g2, g2_test, atol=5e-4, rtol=1e-4) + +class JitCommonTestCase(TestCase): + def createFunctionFromGraph(self, trace): + graph = trace if isinstance(trace, torch._C.Graph) else trace.graph() + return torch._C._create_function_from_graph("forward", graph) + + def assertExportImport(self, trace, inputs): + m = self.createFunctionFromGraph(trace) + self.assertExportImportModule(m, inputs) + + def assertExportImportModule(self, m, inputs): + m_import = self.getExportImportCopy(m) + a = self.runAndSaveRNG(m, inputs) + b = self.runAndSaveRNG(m_import, inputs) + self.assertEqual(a, b, "Results of original model and " + "exported/imported version of model differed") + + def runAndSaveRNG(self, func, inputs, kwargs=None): + kwargs = kwargs if kwargs else {} + with freeze_rng_state(): + results = func(*inputs, **kwargs) + return results + + def getExportImportCopy(self, m, also_test_file=True, map_location=None): + buffer = io.BytesIO() + torch.jit.save(m, buffer) + buffer.seek(0) + imported = torch.jit.load(buffer, map_location=map_location) + + if not also_test_file: + return imported + + with TemporaryFileName() as fname: + torch.jit.save(imported, fname) + return torch.jit.load(fname, map_location=map_location) + + def autoDiffErrorMessage(self, should_autodiff_node, nodes_not_in_diff_graph, + fusion_nodes_not_found, non_fusible_nodes_being_fused, + fusion_nodes_found, nodes_in_diff_graph): + err_msg = "\nFailure in testing nodes' autodifferentiation. " + if should_autodiff_node: + err_msg += "One or more nodes were expected to be autodiffed, " \ + "but were not found in specified fusible/nonfusible " \ + "DifferentiableGraph groups. \nSpecifically:" + # The node is intended to appear in a differentiable graph but doesn't + diff_nodes_missing = [] + # The node is intended to appear in a differentiable graph + # outside of a fusion group but instead is in a fusion group + diff_nodes_in_fusion = [] + # The node is intended to appear in a fusion group but doesn't + fusion_nodes_missing = [] + # The node is intended to appear in a fusion group but instead + # is just in an outer differentiable graph + fusion_nodes_in_diff = [] + for node in nodes_not_in_diff_graph: + if node in non_fusible_nodes_being_fused: + diff_nodes_in_fusion.append(node) + else: + diff_nodes_missing.append(node) + for node in fusion_nodes_not_found: + if node in nodes_in_diff_graph: + fusion_nodes_in_diff.append(node) + else: + fusion_nodes_missing.append(node) + if len(diff_nodes_missing) > 0: + err_msg += f"\n {diff_nodes_missing} were not in one of the " \ + "DifferentiableGraphs when they were expected to be. " \ + "Did you intend for these nodes to be autodiffed? " \ + "If not, remove them from the list of nonfusible nodes." + if len(diff_nodes_in_fusion) > 0: + err_msg += f"\n {diff_nodes_in_fusion} were found in one of the FusionGroups " \ + "when they were expected to be just in a DifferentiableGraph. If it was " \ + "intended for these nodes to be in FusionGroups, reclassify these nodes as " \ + "fusible nodes. If these nodes were not intended to be fused, your " \ + "autodifferentiation logic might be wrong." + if len(fusion_nodes_missing) > 0: + err_msg += f"\n {fusion_nodes_missing} were not in one of the FusionGroups " \ + "of the DifferentiableGraphs when they were expected to be. " \ + "They were also not found in an outer DifferentiableGraph. Did you " \ + "intend for these nodes to be autodifferentiated? If not, you should " \ + "remove these nodes from the test's fusible nodes. Otherwise your " \ + "autodifferentiation logic might be wrong." + if len(fusion_nodes_in_diff) > 0: + err_msg += f"\n {fusion_nodes_in_diff} were not in one of the FusionGroups " \ + "of the DifferentiableGraphs when they were expected to be, " \ + "instead they were found just in an outer DifferentiableGraph. " \ + "Did you intend for these nodes to be fused? If not, you should " \ + "move these nodes into the test's nonfusible nodes. Otherwise your " \ + "autodifferentiation logic might be wrong." + else: + err_msg += "One or more nodes were not expected to be autodiffed " \ + "but were found in a DifferentiableGraph or in a FusionGroup " \ + "of a DifferentiableGraph. Did you intend for these nodes to be " \ + "autodiffed? If so, change this test to expect autodifferentiation. " \ + "\nSpecifically:" + if len(fusion_nodes_found) > 0: + err_msg += f"\n {fusion_nodes_found} were not expected to be in " \ + "one of the DifferentiableGraphs, but appeared in a FusionGroup " \ + "of a DifferentiableGraph. " + if len(nodes_in_diff_graph) > 0: + err_msg += f"\n {nodes_in_diff_graph} were not expected to " \ + "be in one of the DifferentiableGraphs but were." + return err_msg + + def assertAutodiffNode(self, graph, should_autodiff_node, nonfusible_nodes, fusible_nodes): + diff_nodes = graph.findAllNodes('prim::DifferentiableGraph') + diff_subgraphs = [node.g('Subgraph') for node in diff_nodes] + + # Note: currently no tests have fusible_nodes + fusion_nodes = list(chain.from_iterable([g.findAllNodes('prim::FusionGroup') for g in diff_subgraphs])) + fusion_subgraphs = [node.g('Subgraph') for node in fusion_nodes] + + # For any non-fusible node, it must show up in one of the DifferentiableGraphs. + nodes_in_diff_graph = [] + nodes_not_in_diff_graph = [] + non_fusible_nodes_being_fused = [] + for node in nonfusible_nodes: + if any(g.findNode(node) is not None for g in diff_subgraphs): + nodes_in_diff_graph.append(node) + else: + nodes_not_in_diff_graph.append(node) + if any(g.findNode(node) is not None for g in fusion_subgraphs): + non_fusible_nodes_being_fused.append(node) + found_all_nonfusible_nodes = len(nodes_in_diff_graph) == len(nonfusible_nodes) + + # For any fusible node, it must show up in one of the FusionGroups in one of the DifferentiableGraphs. + fusion_nodes_found = [] + fusion_nodes_not_found = [] + for node in fusible_nodes: + if any(g.findNode(node) is not None for g in fusion_subgraphs): + fusion_nodes_found.append(node) + else: + fusion_nodes_not_found.append(node) + found_all_fusible_nodes = len(fusion_nodes_found) == len(fusible_nodes) + + if should_autodiff_node is not None: + err_msg = self.autoDiffErrorMessage(should_autodiff_node, + nodes_not_in_diff_graph, + fusion_nodes_not_found, + non_fusible_nodes_being_fused, + fusion_nodes_found, + nodes_in_diff_graph) + self.assertEqual(should_autodiff_node, + found_all_nonfusible_nodes and found_all_fusible_nodes, err_msg) + + def checkShapeAnalysis(self, out_sizes: Union[List[int], List[List[int]]], + traced_graph, assert_propagation, constant_prop=True): + # repropagte input shapes provided by tracing, + prev_symbolic_shapes_test_enabled = torch._C._jit_symbolic_shapes_test_mode_enabled() + for enable_test_mode in [True, False]: + # here we are testing allowing/disallowing substituting in complete shapes as constants, + # disallowing constants helps stress test partial eval and substitution pipeline + torch._C._jit_set_symbolic_shapes_test_mode(enable_test_mode) + torch._C._jit_erase_non_input_shape_information(traced_graph) + if constant_prop: + torch._C._jit_pass_constant_propagation(traced_graph) + torch._C._jit_pass_propagate_shapes_on_graph(traced_graph) + # Add sizes to default tensor type to avoid checking something out of scope + # and difficulties with tracer leaving in other parts of tensor type + output = next(traced_graph.outputs()).type() + + def test_type(type, actual_size): + sizes = type.symbolic_sizes() + out_type = TensorType.get().with_sizes(sizes) + actual_type = TensorType.get().with_sizes(actual_size) + + # always check actual shape is a subtype of the output + self.assertTrue(actual_type.isSubtypeOf(out_type)) + + # and then if assertion flag is provided, check shape analysis + # is successful + if assert_propagation: + self.assertEqual(out_type.sizes(), actual_size) + + if output.isSubtypeOf(torch._C.TensorType.get()): + test_type(output, out_sizes) + else: + tuple_elements = output.elements() + for i in range(len(tuple_elements)): + test_type(tuple_elements[i], out_sizes[i]) + + torch._C._jit_set_symbolic_shapes_test_mode(prev_symbolic_shapes_test_enabled) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py new file mode 100644 index 0000000000000000000000000000000000000000..3b934eac19b4e4dc4ec55dcdd5855d325d0cc71c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_methods_invocations.py @@ -0,0 +1,22375 @@ +from functools import wraps, partial +from itertools import product, chain, islice +import itertools +import functools +import copy +import operator +import random +import unittest +import math +import enum + +import torch +import numpy as np +from torch import inf, nan + +from typing import Any, Dict, List, Tuple, Union, Sequence +from torch.testing import make_tensor +from torch.testing._internal.common_dtype import ( + _dispatch_dtypes, floating_types, floating_types_and, complex_types, floating_and_complex_types, + floating_and_complex_types_and, all_types_and_complex_and, all_types_and, all_types_and_complex, integral_types_and, + all_types, empty_types, complex_types_and, integral_types, custom_types +) +from torch.testing._internal.common_device_type import \ + (onlyCPU, onlyCUDA, onlyNativeDeviceTypes, disablecuDNN, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver, + skipCUDAIfNoCusolver, skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIf, precisionOverride, + skipCPUIfNoMklSparse, + toleranceOverride, tol) +from torch.testing._internal.common_cuda import ( + PLATFORM_SUPPORTS_FLASH_ATTENTION, SM53OrLater, SM80OrLater, SM90OrLater, with_tf32_off, TEST_CUDNN, + _get_torch_cuda_version, _get_torch_rocm_version, +) +from torch.testing._internal.common_utils import ( + make_fullrank_matrices_with_distinct_singular_values, + TEST_WITH_ROCM, IS_WINDOWS, IS_MACOS, TEST_SCIPY, + torch_to_numpy_dtype_dict, TEST_WITH_ASAN, + GRADCHECK_NONDET_TOL, freeze_rng_state, slowTest, TEST_WITH_SLOW, + TEST_WITH_TORCHINDUCTOR +) + +import torch._refs as refs # noqa: F401 +import torch._refs.nn.functional +import torch._refs.special +import torch._refs.linalg +import torch._prims as prims # noqa: F401 +from torch.utils import _pytree as pytree + + +from packaging import version + +from torch.testing._internal.opinfo.core import ( # noqa: F401 + L, + M, + S, + XS, + _NOTHING, + _getattr_qual, + DecorateInfo, + SampleInput, + ErrorInput, + AliasInfo, + NumericsFilter, + OpInfo, + _generate_reduction_inputs, + _generate_reduction_kwargs, + sample_inputs_reduction, + ReductionOpInfo, + reference_inputs_elementwise_binary, + make_error_inputs_elementwise_binary, + generate_elementwise_binary_tensors, + generate_elementwise_binary_arbitrarily_strided_tensors, + generate_elementwise_binary_small_value_tensors, + generate_elementwise_binary_large_value_tensors, + generate_elementwise_binary_extremal_value_tensors, + generate_elementwise_binary_broadcasting_tensors, + generate_elementwise_binary_with_scalar_samples, + generate_elementwise_binary_with_scalar_and_type_promotion_samples, + generate_elementwise_binary_noncontiguous_tensors, + sample_inputs_elementwise_binary, + BinaryUfuncInfo, + sample_inputs_elementwise_unary, + generate_elementwise_unary_tensors, + generate_elementwise_unary_small_value_tensors, + generate_elementwise_unary_large_value_tensors, + generate_elementwise_unary_extremal_value_tensors, + reference_inputs_elementwise_unary, + UnaryUfuncInfo, + sample_inputs_spectral_ops, + SpectralFuncType, + SpectralFuncInfo, + ShapeFuncInfo, + sample_inputs_foreach, + ForeachFuncInfo, + gradcheck_wrapper_hermitian_input, + gradcheck_wrapper_triangular_input, + gradcheck_wrapper_triangular_input_real_positive_diagonal, + gradcheck_wrapper_masked_operation, + gradcheck_wrapper_masked_pointwise_operation, + clone_sample, +) +from torch.testing._internal.opinfo.refs import ( # NOQA: F401 + _find_referenced_opinfo, + _inherit_constructor_args, + PythonRefInfo, + ReductionPythonRefInfo, + ElementwiseUnaryPythonRefInfo, + ElementwiseBinaryPythonRefInfo, +) +from torch.testing._internal.opinfo.utils import ( + np_unary_ufunc_integer_promotion_wrapper, + reference_reduction_numpy, + prod_numpy +) +from torch.testing._internal import opinfo +from torch.testing._internal.opinfo.definitions.linalg import ( + sample_inputs_linalg_cholesky, + sample_inputs_linalg_cholesky_inverse, + sample_inputs_cross, + sample_inputs_linalg_qr_geqrf, + sample_inputs_linalg_invertible, + sample_inputs_lu_solve, + sample_inputs_legacy_solve, + sample_inputs_svd, + sample_inputs_linalg_det_logdet_slogdet, + sample_inputs_linalg_lu, + sample_inputs_diagonal_diag_embed, + error_inputs_diagonal_diag_embed, +) +from torch.testing._internal.opinfo.definitions.special import ( + sample_inputs_i0_i1, + sample_inputs_polygamma, + reference_polygamma, +) +from torch.testing._internal.opinfo.definitions._masked import ( + sample_inputs_softmax_variant, +) +from torch.testing._internal.opinfo.definitions.sparse import ( + error_inputs_sparse_like_fns, + sample_inputs_sparse_like_fns, + error_inputs_sparse_mul, + sample_inputs_sparse_mul, + error_inputs_sparse_reduction_sum, + sample_inputs_sparse_reduction_sum +) + +if TEST_SCIPY: + from scipy import stats + import scipy.spatial + import scipy.special + + +# test if a tensor is close to an integer +def close_to_int(x, eps=0.1): + if x.is_complex(): + y = torch.abs(torch.view_as_complex(torch.frac(torch.view_as_real(x)))) + else: + y = torch.abs(torch.frac(x)) + return (y < eps) | (y > (1 - eps)) + + +def sample_inputs_slice(op_info, device, dtype, requires_grad, **kwargs): + + make_input = partial(make_tensor, device=device, dtype=dtype, + low=None, high=None, requires_grad=requires_grad) + + yield SampleInput(make_input(3), 0) + + yield SampleInput(make_input(20, 30, 40), dim=1, start=1, end=-2) + + yield SampleInput(make_input(20, 30, 40), dim=1, start=1, end=-2, step=3) + + yield SampleInput(make_input(20, 30, 40), dim=0, start=-10, end=-2, step=2) + + +def sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, + low=None, high=None, requires_grad=requires_grad) + + args_cases = ( + # Cases with tensor indices. + (torch.tensor([1, 2, 3]),), + (torch.tensor(1),), + (torch.tensor([1, 2, 3]), 1), + (torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1), + # Cases with list of indices. + ((2, 4),), + ((2, 4), 1), + ((2, 4), -1), + # Cases with integer section. + (3,), + (3, 1), + (3, -1), + ) + + for args in args_cases: + yield SampleInput(make_input((S, S, S)), args=args) + + +def sample_inputs_hsplit(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(6), 2) + yield SampleInput(make_arg(S, S, S), [1, 2, 3]) + +def sample_inputs_vsplit(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(6, S), 2) + yield SampleInput(make_arg(S, S, S), [1, 2, 3]) + +def sample_inputs_dsplit(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(S, S, S), [1, 2, 3]) + yield SampleInput(make_arg(S, S, 6), 2) + +def error_inputs_hsplit(op_info, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + err_msg1 = ("torch.hsplit requires a tensor with at least 1 dimension, " + "but got a tensor with 0 dimensions!") + yield ErrorInput(SampleInput(make_arg(()), 0), error_regex=err_msg1) + + err_msg2 = (f"torch.hsplit attempted to split along dimension 1, " + f"but the size of the dimension {S} " + f"is not divisible by the split_size 0!") + yield ErrorInput(SampleInput(make_arg((S, S, S)), 0), error_regex=err_msg2) + + # Incorrect type for indices_or_section argument + err_msg3 = ("received an invalid combination of arguments.") + yield ErrorInput( + SampleInput(make_arg((S, S, S)), "abc"), + error_type=TypeError, error_regex=err_msg3) + +def error_inputs_vsplit(op_info, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + err_msg1 = ("torch.vsplit requires a tensor with at least 2 dimension, " + "but got a tensor with 1 dimensions!") + yield ErrorInput(SampleInput(make_arg(S), 0), error_regex=err_msg1) + + err_msg2 = (f"torch.vsplit attempted to split along dimension 0, " + f"but the size of the dimension {S} " + f"is not divisible by the split_size 0!") + yield ErrorInput(SampleInput(make_arg(S, S, S), 0), + error_regex=err_msg2) + + # Incorrect type for indices_or_section argument + err_msg3 = ("received an invalid combination of arguments.") + yield ErrorInput(SampleInput(make_arg(S, S, S), "abc"), + error_type=TypeError, error_regex=err_msg3) + +def error_inputs_dsplit(op_info, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + err_msg1 = ("torch.dsplit requires a tensor with at least 3 dimension, " + "but got a tensor with 1 dimensions!") + yield ErrorInput(SampleInput(make_arg(S), 0), error_regex=err_msg1) + + err_msg2 = (f"torch.dsplit attempted to split along dimension 2, " + f"but the size of the dimension {S} " + f"is not divisible by the split_size 0!") + yield ErrorInput(SampleInput(make_arg(S, S, S), 0), error_regex=err_msg2) + + +def sample_inputs_as_strided(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # input shape, output shape, output stride, output storage offset + test_cases = ( + ((1,), (1,), (1,), 0), + ((3, 3), (2, 2), (1, 2), 0), + ((3, 3), (2, 2), (1, 2), 1), + ((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0), + ((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0), + ) + + for input_shape, output_shape, stride, storage_offset in test_cases: + input_t = make_arg(input_shape) + kwargs = dict(storage_offset=storage_offset) + yield SampleInput(input_t, args=(output_shape, stride), kwargs=kwargs) + +def sample_inputs_as_strided_partial_views(op_info, device, dtype, requires_grad, **kwargs): + def make_arg(): + base = make_tensor((20,), device=device, dtype=dtype) + return base[5:15].requires_grad_(requires_grad) + + # as_strided on offset, partial views + yield SampleInput(make_arg(), (2, 2), (1, 2)) + yield SampleInput(make_arg(), (2, 2), (1, 2), storage_offset=0) + yield SampleInput(make_arg(), (2, 2), (1, 2), storage_offset=10) + +def sample_inputs_as_strided_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # input shape, output shape, output stride, output storage offset + test_cases = [ + ((1,), (), (), 0), + ((1,), (1,), (1,), 0), + ((3, 3), (2, 2), (1, 2), 0), + ((3, 3), (2, 2), (1, 2), 1), + ((3, 3), (2, 2), (2, 1), 0), + # Scatter to larger dimensions + ((16,), (2, 2, 2, 2), (8, 4, 2, 1), 0), + # Scatter to larger dimensions with strides inverted + ((16,), (2, 1, 1, 2), (1, 2, 4, 8), 0), + ] + + for input_shape, output_shape, stride, storage_offset in test_cases: + input_t = make_arg(input_shape) + input_src = make_arg(output_shape) + yield SampleInput(input_t, input_src, output_shape, stride, storage_offset=storage_offset) + + +def error_inputs_as_strided_scatter(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + + # Create a small tensor and try to scatter it out of bounds + input_t = make_arg([4, 4]) + input_src = make_arg([2, 2]) + yield ErrorInput( + SampleInput(input_t, input_src, [2, 2], [200, 200], storage_offset=0), + error_regex="itemsize 4 requiring a storage size of 1604 are out of bounds for storage of size 64" + ) + + +def sample_inputs_combinations(op_info, device, dtype, requires_grad, **kwargs): + inputs = ( + (0,), + (0, 1), + (0, 1, 2, 3), + ) + + rvals = [1, 2, 4] + + products = product(inputs, rvals, [False, True]) + + for input_data, r, with_replacement in products: + input_t = torch.tensor(input_data, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(input_t, r=r, with_replacement=with_replacement) + +def sample_inputs_cartesian_prod(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(torch.tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # constructs 1-D tensors with varying number of elements + a = make_arg((0,)) + b = make_arg((0, 1)) + c = make_arg((0, 1, 2, 3)) + + # sample with only 1 tensor + yield SampleInput(a) + + # sample with 2 tensors + yield SampleInput(a, b) + + # sample with 3 tensors + yield SampleInput(a, b, c) + +def sample_inputs_cosine_similarity(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input_shape, dict of dim and eps + cases: Tuple[tuple, dict] = ( # type: ignore[assignment] + ((S, S), {'dim': 1}), + ((S, 2), {'dim': -1}), + ((S,), {'dim': 0, 'eps': 0.5}), + ((), {'dim': 0}), + ((S, S, M), {'dim': 2}), + ((S, S), {}) + ) + + for input_shape, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(make_arg(input_shape),), kwargs=kwargs) + # Test for Broadcasting + yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) + yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -2}) + yield SampleInput(make_arg((2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) + + +def sample_inputs_item(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + + cases = ( + (), + (()), + (1), + ((1,)), + ) + + for shape in cases: + yield SampleInput(make_arg(shape)) + +def error_inputs_item(op, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device, requires_grad=False) + + cases = ( + (M), + ((S,)), + (S, S), + (S, M, L), + ) + + for shape in cases: + yield ErrorInput( + SampleInput(make_arg(shape)), error_type=RuntimeError, + error_regex="elements cannot be converted to Scalar") + + +def sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + # Ordered as: input shape, kwargs for training, momentum, eps + cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] + ((S, S, S), {'training': True, 'momentum': 0.5, 'eps': 0.6}), + ((3, 2, 4), {'training': False, 'momentum': -1.2}), + ((3, 1), {'training': True, 'momentum': 0.0}), + ((0,), {'training': True}), + ((0,), {'training': False}), + ((3, 2, 3, 4), {'training': True, 'momentum': -1.0, 'eps': 0.5}), + ((3, 2, 3, 4), {'training': False, 'momentum': -1.0, 'eps': 0.5}), + ((2, 1), {}), + ) + + for input_shape, kwargs in cases: + # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) + channels = input_shape[1] if len(input_shape) > 1 else 0 + weight = make_arg(channels) if channels > 0 else None + bias = make_arg(channels) if channels > 0 else None + running_mean = make_arg_without_requires_grad(channels, low=0) + running_var = make_arg_without_requires_grad(channels, low=0) + + yield SampleInput( + make_arg(input_shape), + args=( + running_mean, + running_var, + weight, + bias + ), + kwargs=kwargs + ) + + # Checking for permutations of weights and biases as `None` + weights = [channels, None, None] + biases = [None, channels, None] + is_training = [True, False, False] + + for weight, bias, training in zip(weights, biases, is_training): + yield SampleInput( + make_arg(input_shape), + args=( + running_mean, + running_var, + make_arg(channels), + make_arg(channels) + ), + kwargs={'training': training} + ) + + # Test case for no optional kwargs + # running_mean and running_var are required in evaluation mode (training: False) but not in training mode + yield SampleInput(make_arg((1, 2, 3)), args=(None, None, None, None), kwargs={'training': True}) + +def sample_inputs_softmax_backward_data(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad + ) + cases = [ + ((S,), 0), + ((S, S), 0), + ((S, M, S), -1), + ] + input_dtypes = [dtype] + if dtype == torch.float and device == 'cuda': + input_dtypes += [torch.float16] + + for (shape, dim), input_dtype in product(cases, input_dtypes): + input = make_arg(shape) + output = torch.nn.functional.softmax(input, dim=dim, dtype=input_dtype) + yield SampleInput(make_arg(shape), output, dim, input_dtype) + +def sample_inputs_native_batch_norm(op_info, device, dtype, requires_grad, **kwargs): + samples = sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs) + for sample in samples: + # torch.native_batch_norm does not support 0 numel tensors + # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) + if sample.input.numel() == 0: + continue + args = sample.args + training = sample.kwargs.get('training', True) + momentum = sample.kwargs.get('momentum', 0.5) + eps = sample.kwargs.get('eps', 1e-5) + yield SampleInput(sample.input, args=(args[2], args[3], args[0], args[1], training, momentum, eps)) + + +def sample_inputs__native_batch_norm_legit(op_info, device, dtype, requires_grad, **kwargs): + samples = sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs) + for sample in samples: + # torch.native_batch_norm does not support 0 numel tensors + # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) + if sample.input.numel() == 0: + continue + args = sample.args + training = sample.kwargs.get('training', True) + momentum = sample.kwargs.get('momentum', 0.5) + eps = sample.kwargs.get('eps', 1e-5) + if args[0] is not None and args[1] is not None: + yield SampleInput(sample.input, args=(args[2], args[3], args[0], args[1], training, momentum, eps)) + else: + yield SampleInput(sample.input, args=(args[2], args[3], training, momentum, eps)) + + +def sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + (()), + ((S, )), + ((S, S)), + ((S, M, S)) + ) + + for shape in cases: + yield SampleInput(make_arg(shape)) + +def sample_inputs_prelu(op_info, device, dtype, requires_grad, **kwargs): + op_kwargs = op_info.sample_kwargs(device, dtype, None)[0] + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad, + op_kwargs=op_kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + (()), + ((S, )), + ((S, S)), + ((S, M, S)) + ) + + for shape in cases: + for weight in [-1., 0., 0.8, 1.]: + weight_tensor = torch.tensor(weight, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(shape), args=(weight_tensor,)) + + channel_size = shape[1] if len(shape) >= 2 else 1 + yield SampleInput(make_arg(shape), args=(make_arg((channel_size,)),)) + + weight_tensor = torch.tensor(1., device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S)), kwargs=dict(weight=weight_tensor,)) + yield SampleInput(make_arg((S, S)), kwargs=dict(weight=make_arg((S,)),)) + +def reference_inputs_prelu(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_prelu(op, device, dtype, requires_grad, **kwargs) + yield from reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs) + +def sample_kwargs_prelu_scalar_weight(device, dtype, input): + weight = torch.rand(tuple(), device=device, dtype=dtype) + # NumPy does not support bfloat16, so we default to float32 (only for NumPy) in that case + if dtype == torch.bfloat16: + weight_cpu = weight.to(dtype=torch.float32, device="cpu") + else: + weight_cpu = weight.cpu() + np_weight = weight_cpu.numpy() + return ({'weight': weight}, {'weight': np_weight}) + +def error_inputs_prelu(op, device): + # Weight has numel != 1, but self.ndim is zero-dim tensor + inp = make_tensor(tuple(), device=device, dtype=torch.float32) + weight = make_tensor((2,), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), + error_regex="Not allow zero-dim input tensor.") + + # Weight has numel != 1, but numel does not match channel size + inp = make_tensor((2, 8, 3), device=device, dtype=torch.float32) + weight = make_tensor((9,), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), + error_regex="Mismatch of parameter numbers and input channel size.") + + # Weight is neither a scalar nor 1-D tensor + inp = make_tensor((2, 8, 3), device=device, dtype=torch.float32) + weight = make_tensor((2, 4), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), + error_regex="prelu: Expected `weight` to be a scalar or 1D tensor, but got: ndim = 2") + + # src and index tensors must have the same # of dimensions +def sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # ord = inf is tested in inputs_norm_inf as it fails on some tests + cases = [ + ((S, S), (2,), '2'), + ((S, S), (0,), '0'), + ((S, S), (0.5,), '0_5'), + ((S, S), (1,), '1'), + ((S, S), (3,), '3'), + ((S, S), (-1,), 'neg_1'), + ((S, S), (-2,), 'neg_2'), + ((S, S), (-0.5,), 'neg_0_5'), + ((S, S), (-1.5,), 'neg_1_5'), + ] + + cases_nonzero_input = ( + ((S, S, S), (1.5,), '1_5_default'), + ((S, S, S), (1.5, 1), '1_5_dim'), + ((S, S, S), (1.5, -1), '1_5_neg_dim'), + ((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'), + ((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'), + ) + + cases_posdim = ( + ((S, S), (-2, 1,), 'neg_2_dim'), + ((S, S), (-1, 1,), 'neg_1_dim'), + ((S, S), (0, 1,), '0_dim'), + ((S, S), (1, 1,), '1_dim'), + ((S, S), (2, 1,), '2_dim'), + ((S, S), (3, 1,), '3_dim'), + ((S, S, S), (2, 1), '2_dim'), + ((S, S, S), (3, 1), '3_dim'), + ((S, S, S), (2, 1, True), 'keepdim_2_dim'), + ((S, S, S), (3, 1, True), 'keepdim_3_dim'), + ((), (2, 0), '2_dim_scalar'), + ((), (3, 0), '3_dim_scalar'), + ((), (2, 0, True), 'keepdim_2_dim_scalar'), + ((), (3, 0, True), 'keepdim_3_dim_scalar'), + ) + + cases_negdim = ((shape, args[:1] + (-args[1],) + args[2:], name.replace("_dim", "_neg_dim")) + for shape, args, name in cases_posdim) + + for shape, args, name in itertools.chain(cases, cases_posdim, cases_negdim): + yield SampleInput(make_arg(shape), args=args, name=name) + + for shape, args, name in cases_nonzero_input: + yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name) + + +def sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((S, S), (), 'default'), + ((S, S), ('fro',), 'fro_default'), + ((S, S), ('fro', [0, 1],), 'fro'), + ) + + for shape, args, name in cases: + yield SampleInput(make_arg(shape), args=args, name=name) + + +def sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((S, S), ('nuc',), 'nuc'), + ((S, S, S), ('nuc', [1, 2]), 'nuc_batched'), + ) + + for shape, args, name in cases: + yield SampleInput(make_arg(shape), args=args, name=name) + + +def sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((S, S), (-inf,), '-inf'), + ((S, S), (inf,), 'inf'), + ((S, S), (inf, 1,), 'inf_2_dim'), + ((S, S), (inf, -1,), 'inf_2_neg_dim'), + ) + + for shape, args, name in cases: + yield SampleInput(make_arg(shape), args=args, name=name) + + +def sample_inputs_equal(op, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + ((), ()), + ((S,), ()), + ((), (S,)), + ((S, 1), (S,)), + ((M, S), ()), + ((S, S), (S, S)) + ) + + for shape_lhs, shape_rhs in shapes: + lhs = make_arg(shape_lhs) + rhs = make_arg(shape_rhs) + broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs) + + yield SampleInput(lhs, args=(rhs,), broadcasts_input=broadcasts_input) + if shape_lhs == shape_rhs: + yield SampleInput(lhs, args=(lhs.clone().detach_(),)) + + + +def sample_inputs_jiterator(op, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + ((), ()), + ((S,), ()), + ((S, 1), (S,)), + ((M, S), ()), + ((S, M, S), (M, S)), + ((S, M, S), (S, M, S)), + ((M, 1, S), (M, S)), + ((M, 1, S), (1, M, S)), + ((0, 1, 3), (0, 10, 3)) + ) + + num_inputs = kwargs.get('num_inputs') + sample_kwargs = kwargs.get('sample_kwargs', {}) + + for shape_lhs, shape_rhs in shapes: + lhs = make_arg(shape_lhs) + + args = [] + for i in range(num_inputs - 1): + args.append(make_arg(shape_rhs)) + broadcasts_input = (shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs)) + + yield SampleInput(lhs, args=tuple(args), kwargs=sample_kwargs, broadcasts_input=broadcasts_input) + +def sample_inputs_broadcast_shapes(op, device, dtype, requires_grad, **kwargs): + shapes = ( + ((), ()), + ((S,), ()), + ((S, 1), (S,)), + ((S, 1), S), + ((M, S), ()), + ((S, M, S), (M, S)), + ((S, M, S), (S, M, S)), + ((M, 1, S), (M, S)), + ((M, 1, S), (1, M, S)), + ((0, 1, 3), (0, 10, 3)) + ) + + for shape in shapes: + inp, *arg0 = shape + yield SampleInput(inp, args=tuple(arg0)) + +def sample_inputs_add_sub(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) + + # Adds alpha kwarg cases + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) + rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) + if dtype is not torch.bool: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': 2}) + else: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': True}) + neg_alpha = -3.125 if (dtype.is_floating_point or dtype.is_complex) else -3 + lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) + rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) + if dtype is not torch.bool: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': neg_alpha}) + else: + yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': False}) + +def error_inputs_arange(op, device, **kwargs): + yield ErrorInput(SampleInput(0, args=(3, 0)), error_type=RuntimeError, error_regex='step must be nonzer') + yield ErrorInput(SampleInput(0, args=(-3, 2)), error_type=RuntimeError, error_regex='bound inconsistent with step sign') + yield ErrorInput(SampleInput(0, args=(3, -2)), error_type=RuntimeError, error_regex='bound inconsistent with step sign') + yield ErrorInput(SampleInput(0, args=(float('inf'), 2)), error_type=RuntimeError, error_regex='unsupported range') + yield ErrorInput(SampleInput(float('-inf'), args=(1, 2)), error_type=RuntimeError, error_regex='unsupported range') + +def sample_inputs_arange(op, device, dtype, requires_grad, **kwargs): + int_samples = ( + # positive direction + (-1, 2, 2), + # negative direction + (2, -3, -1), + # start == end + (1, 1, 1), + (1, 1, -1), + # divides evenly + (0, -8, -4), + (1, 5, 2), + # bool + (False, True, True), + # default step + (0, 1, None), + # default start + (None, 3, None), + ) + + def to_float(start, end, step): + start = start + 0.1 if start is not None else None + end = end + 0.1 + step = float(step) if step is not None else None + return start, end, step + + float_samples = ( + # includes endpoint + (0., -8. - 1e-6, -4.), + (1., 5. + 1e-6, 2.), + (0., -8., -4.), + (1., 5., 2.), + *(to_float(start, end, step) for (start, end, step) in int_samples), + ) + + large_samples = ( + (0, 10000, None), + ) + + samples = int_samples + float_samples + if dtype not in (torch.int8, torch.uint8): + samples += large_samples + + for start, end, step in samples: + if start is None: + assert step is None + # Pass end as positional arg + yield SampleInput(end, kwargs={"dtype": dtype, "device": device}) + # (Similar to) calling torch.arange(end=3) + yield SampleInput(0, kwargs={"end": end, "dtype": dtype, "device": device}) + elif step is None: + yield SampleInput(start, args=(end,), kwargs={"dtype": dtype, "device": device}) + else: + yield SampleInput(start, args=(end, step), kwargs={"dtype": dtype, "device": device}) + + yield SampleInput(2) + yield SampleInput(1, args=(3, 1)) + +def sample_inputs_randn(op, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + + shapes = ( + (M,), + (S, S) + ) + + for shape in shapes: + yield SampleInput(input=shape, kwargs=dict(dtype=dtype, device=device, requires_grad=requires_grad)) + +def sample_inputs_normal(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((S, S), 0, 5), + ((S, S, S), -2, 0.5), + ) + for shape, mean, std in samples: + yield SampleInput(make_arg(shape), args=(mean, std)) + +def error_inputs_normal(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_std = -1 + yield ErrorInput( + SampleInput(t, args=(0, invalid_std)), + error_type=RuntimeError, + error_regex=fr"normal expects std >= 0.0, but found std {invalid_std}", + ) + +def sample_inputs_cauchy(op, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0, 0.5), + ((S, S), 0, 1), + ((S, S, S), -2, 1), + ) + for shape, median, gamma in samples: + yield SampleInput(make_arg(shape), args=(median, gamma)) + + +def error_inputs_cauchy(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_scale = 0 + yield ErrorInput( + SampleInput(t, args=(0, invalid_scale,)), + error_type=RuntimeError, + error_regex=fr"cauchy_ expects sigma > 0.0, but found sigma={invalid_scale}", + ) + + +def sample_inputs_exponential(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0.5), + ((S, S), 1), + ((S, S, S), 1.5), + ) + for shape, rate in samples: + yield SampleInput(make_arg(shape), args=(rate,)) + + +def error_inputs_exponential(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_rate = 0 + yield ErrorInput( + SampleInput(t, args=(invalid_rate,)), + error_type=RuntimeError, + error_regex=fr"exponential_ expects lambda > 0.0, but found lambda={invalid_rate}", + ) + + +def sample_inputs_geometric(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0.2), + ((S, S), 0.5), + ((S, S, S), 0.8), + ) + for shape, rate in samples: + yield SampleInput(make_arg(shape), args=(rate,)) + + +def error_inputs_geometric(op, device, **kwargs): + t = torch.zeros([10], device=device) + neg_prob = -1 + yield ErrorInput( + SampleInput(t, args=(neg_prob,)), + error_type=RuntimeError, + error_regex=fr"geometric_ expects p to be in \(0, 1\), but got p={neg_prob}", + ) + + +def sample_inputs_log_normal(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), 0, 0.25), + ((S, S), 0.5, 1), + ((S, S, S), 0, 0.5), + ) + for shape, mean, std in samples: + yield SampleInput(make_arg(shape), args=(mean, std)) + + +def error_inputs_log_normal(op, device, **kwargs): + t = torch.zeros([10], device=device) + invalid_std = 0 + yield ErrorInput( + SampleInput(t, args=(0, invalid_std)), + error_type=RuntimeError, + error_regex=fr"log_normal_ expects std > 0.0, but found std={invalid_std}", + ) + + +def sample_inputs_uniform(op, device, dtype, requires_grad, **kwargs): + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) + samples = ( + ((M,), -100, 100), + ((S, S), 0, 1), + ((S, S, S), 1, 2), + ) + for shape, hi, lo in samples: + yield SampleInput(make_arg(shape), args=(hi, lo)) + +def sample_inputs_ones_zeros(op, device, dtype, requires_grad, **kwargs): + # this is a bit messy, as we want the args to be tuples + # so if we pass size as a tuple, we have a tuple containing a tuple + sizes = ( + (M,), + (S, S), + ) + for size in sizes: + yield SampleInput(size, kwargs={'dtype': dtype, 'device': device}) + +def sample_inputs_full(op, device, dtype, requires_grad, **kwargs): + def get_val(dtype): + return make_tensor([], dtype=dtype, device="cpu").item() + + sizes = ( + (M,), + (S, S), + ) + fill_values = [get_val(dtype), get_val(torch.int)] + + for size, fill_value in product(sizes, fill_values): + yield SampleInput(size, fill_value, dtype=dtype, device=device) + + +def error_inputs_uniform(op, device, **kwargs): + t = torch.zeros([10], device=device) + yield ErrorInput( + SampleInput(t, args=(3, -1)), + error_type=RuntimeError, + error_regex=r"uniform_ expects to return a \[from, to\) range, but found from=3 > to=-1", + ) + + +def error_inputs_linspace(op, device, **kwargs): + yield ErrorInput(SampleInput(0, args=(3, -1)), error_type=RuntimeError, error_regex='number of steps must be non-negative') + yield ErrorInput( + SampleInput(0, args=(3, 1.)), + error_type=TypeError, + error_regex="received an invalid combination of arguments - got \\(int, int, float", + ) + yield ErrorInput( + SampleInput(torch.tensor([1, 1], device=device), args=(torch.tensor([3, 3], device=device), 1)), + error_type=RuntimeError, + error_regex="only supports 0-dimensional start and end tensors" + ) + + +def sample_inputs_linspace(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1, 4, 50) + starts = (-2., 0, 4.3, 50) + nsteps = (0, 1, 50) + # Extra case to replicate off-by-one issue on CUDA + cases = list(product(starts, ends, nsteps)) + [(0, 7, 50)] + for start, end, nstep in cases: + if dtype == torch.uint8 and (end < 0 or start < 0): + continue + yield SampleInput(start, args=(end, nstep), kwargs={"dtype": dtype, "device": device}) + + yield SampleInput(1, args=(3, 1)) + + +def sample_inputs_linspace_tensor_overload(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1, 4, 50) + starts = (-2., 0, 4.3, 50) + nsteps = (0, 1, 50) + is_start_end_tensors = ((True, True), (True, False), (False, True)) + make_arg = partial(torch.tensor, device=device, requires_grad=False) + + # Extra case to replicate off-by-one issue on CUDA + cases = list(product(starts, ends, nsteps, is_start_end_tensors)) + [(0, 7, 50, (True, True))] + for start, end, nstep, (is_start_tensor, is_end_tensor) in cases: + if dtype == torch.uint8 and (end < 0 or start < 0): + continue + + tensor_options = {"dtype": dtype, "device": device} + if is_start_tensor: + start = make_arg(start, dtype=torch.float32 if isinstance(start, float) else torch.int64) + if is_end_tensor: + end = make_arg(end, dtype=torch.float32 if isinstance(end, float) else torch.int64) + + yield SampleInput(start, args=(end, nstep), kwargs=tensor_options) + + yield SampleInput(1, args=(3, 1)) + + +def sample_inputs_logspace(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1.2, 2, 4) + starts = (-2., 0, 1, 2, 4.3) + nsteps = (0, 1, 2, 4) + bases = (2., 1.1) if dtype in (torch.int8, torch.uint8) else (None, 2., 3., 1.1, 5.) + for start, end, nstep, base in product(starts, ends, nsteps, bases): + if dtype == torch.uint8 and end < 0 or start < 0: + continue + if nstep == 1 and isinstance(start, float) and not (dtype.is_complex or dtype.is_floating_point): + # https://github.com/pytorch/pytorch/issues/82242 + continue + if base is None: + yield SampleInput(start, args=(end, nstep), kwargs={"dtype": dtype, "device": device}) + else: + yield SampleInput(start, args=(end, nstep, base), kwargs={"dtype": dtype, "device": device}) + + yield SampleInput(1, args=(3, 1, 2.)) + + +def sample_inputs_logspace_tensor_overload(op, device, dtype, requires_grad, **kwargs): + ends = (-3, 0, 1.2, 2, 4) + starts = (-2., 0, 1, 2, 4.3) + nsteps = (0, 1, 2, 4) + bases = (2., 1.1) if dtype in (torch.int8, torch.uint8) else (None, 2., 3., 1.1, 5.) + is_start_end_tensors = ((True, True), (True, False), (False, True)) + make_arg = partial(torch.tensor, device=device) + for start, end, nstep, base, (is_start_tensor, is_end_tensor) in product(starts, ends, nsteps, bases, is_start_end_tensors): + if dtype == torch.uint8 and end < 0 or start < 0: + continue + if nstep == 1 and isinstance(start, float) and not (dtype.is_complex or dtype.is_floating_point): + # https://github.com/pytorch/pytorch/issues/82242 + continue + + tensor_options = {"dtype": dtype, "device": device} + + if (is_start_tensor): + start = make_arg(start, dtype=torch.float32 if isinstance(start, float) else torch.int64) + if (is_end_tensor): + end = make_arg(end, dtype=torch.float32 if isinstance(end, float) else torch.int64) + + if base is None: + yield SampleInput(start, args=(end, nstep), kwargs=tensor_options) + else: + yield SampleInput(start, args=(end, nstep, base), kwargs=tensor_options) + + yield SampleInput(1, args=(3, 1, 2.)) + + +def sample_inputs_isclose(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) + + # Creates additional inputs to test the rtol, atol, and equal_nan params + rtols = [0., 1e-7] + atols = [0., 1e-7] + equal_nans = [False, True] + + products = product(rtols, atols, equal_nans) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + for rtol, atol, equal_nan in products: + lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) + rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) + + yield SampleInput(lhs, args=(rhs,), + kwargs=dict(rtol=rtol, atol=atol, equal_nan=equal_nan)) + + +def error_inputs_isclose(op, device, **kwargs): + make_float_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + + yield ErrorInput(SampleInput(make_float_arg(()), args=(make_float_arg(()),), kwargs={'rtol': -0.4}), + error_type=RuntimeError, + error_regex='rtol must be greater than or equal to zero') + + yield ErrorInput(SampleInput(make_float_arg(()), args=(make_float_arg(()),), kwargs={'atol': -0.4}), + error_type=RuntimeError, + error_regex='atol must be greater than or equal to zero') + + +def sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg((1, 2))) + yield SampleInput(make_arg((2,))) + yield SampleInput(make_arg(())) + + +def sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_arg_conj(size): + return make_arg(size).conj().requires_grad_(requires_grad) + + first_shape, second_shape = (S, M), (M, S) + + yield SampleInput(make_arg(first_shape), args=(make_arg(second_shape),)) + + if dtype.is_complex: + yield SampleInput(make_arg(first_shape), args=(make_arg_conj(second_shape),)) + + +def sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs): + alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6) + beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2) + tests_list = [ + ((2, 3), (2, 2), (2, 3), False) + ] + tests_with_lhs_broadcasting = [ + ((1,), (2, 2), (2, 3), True), + ((), (2, 2), (2, 3), True) + ] + test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator] + + kwargs = dict(alpha=alpha_val, beta=beta_val) + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape_a, shape_b, shape_c, broadcasts_input in test_cases: + yield SampleInput( + make_arg(shape_a), + make_arg(shape_b), + make_arg(shape_c), + **kwargs, + ).with_metadata(broadcasts_input=broadcasts_input) + + if dtype.is_complex: + shape = (3, 3) + yield SampleInput( + make_arg(shape), + make_arg(shape, requires_grad=False).mH.requires_grad_(requires_grad), + make_arg(shape), + **kwargs, + ) + yield SampleInput( + make_arg(shape), + make_arg(shape), + make_arg(shape, requires_grad=False).mH.requires_grad_(requires_grad), + **kwargs, + ) + +def sample_inputs_sparse_sampled_addmm(op_info, device, dtype, requires_grad, **kwargs): + alpha = 2 + 3j if dtype.is_complex else 0.6 + beta = 1 + 2j if dtype.is_complex else 0.2 + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # sparse.sampled_addmm performs: alpha * (A @ B) * sparse_ones_like(C) + beta * C + for m, n, k in itertools.product([0, 5], repeat=3): + yield SampleInput( + torch.eye(m, n, device=device, dtype=dtype) + .to_sparse_csr() + .requires_grad_(requires_grad), + make_arg((m, k)), + make_arg((k, n)), + alpha=alpha, + beta=beta, + ) + +def sample_inputs_sparse_mm_reduce(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + reductions = ["sum", "mean", "amax", "amin"] + for m, k, reduce in product([5, 7], [3, 11], reductions): + yield SampleInput( + torch.eye(m, m) + .to(device=device, dtype=dtype) + .to_sparse_csr() + .requires_grad_(requires_grad), + make_arg((m, k)), + reduce, + ) + + +def sample_inputs_mv(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(S, M), make_arg(M)) + +def sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg(M, S, M), make_arg(M, M, S)) + +def sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_arg_conj(size): + return make_arg(size).conj().requires_grad_(requires_grad) + + yield SampleInput(make_arg((S, )), make_arg((S, ))) + if dtype.is_complex: + # dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor) + # is tested in test_conj_view (which tests operations with only conjugated input tensor + # -- not conjugated arg tensors) + yield SampleInput(make_arg((S, )), make_arg_conj((S, ))) + + +def error_inputs_dot_vdot(op_info, device, is_ref=False, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + + if not is_ref: + yield ErrorInput(SampleInput(make_input(1), args=(make_input(3, dtype=torch.float16),)), + error_regex='dot : expected both vectors to have same dtype') + yield ErrorInput(SampleInput(make_input(1, 1), args=(make_input(3),)), + error_regex='1D tensors expected') + yield ErrorInput(SampleInput(make_input(9), args=(make_input(3),)), + error_regex='inconsistent tensor size') + if device != "cpu" and not is_ref: + yield ErrorInput(SampleInput(make_input(3), args=(make_input(3, device="cpu"),)), + error_regex='Expected all tensors to be on the same device') + + +def sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + test_cases = (((S,), (S, M), (M,), 1, 1, False), + ((S,), (S, M), (M,), 0.2, 0.6, False), + ) + + test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True), + ((1,), (S, M), (M,), 0.2, 0.6, True), + ((), (S, M), (M,), 1, 1, True), + ((), (S, M), (M,), 0.2, 0.6, True), + ) + + cases = test_cases + test_cases_with_broadcast + + # addmv performs: beta * M + alpha * (mat @ vec) + for size, mat, vec, beta, alpha, broadcasts_input in cases: + yield SampleInput(make_arg(size), args=(make_arg(mat), make_arg(vec)), + kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input) + +def sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting + test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False), + ((1,), (S, S, S), (S, S, M), 1, 1, True), + ((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), + ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), + ((), (S, S, S), (S, S, M), 1, 1, True), + ((), (S, S, S), (S, S, M), 0.6, 0.2, True), + ] + + for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases: + if dtype.is_complex: + beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j) + yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), + kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting) + yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), + kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting) + +def sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + test_cases = [(((S, S), (S, S), (S, S)), False), + (((S, S), (S, 1), (1, S)), False), + (((1,), (S, S, 1), (1, S)), True), + (((), (), ()), False), + (((S, S), (), ()), True), + (((), (S, S, 1), (1, S)), True) + ] + + for input_args, broadcasts_input in test_cases: + # addcdiv should accept inputs with zero value + # Currently, it throws ZeroDivisionError when the denominator is zero + # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed + args = tuple(make_arg(arg, exclude_zero=True) if isinstance(arg, tuple) else arg + for arg in input_args) + yield SampleInput(*args).with_metadata(broadcasts_input=broadcasts_input) + + # addcdiv should accept inputs with zero value + # Currently, it throws ZeroDivisionError when the denominator is zero + # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed + args = tuple(make_arg(arg, exclude_zero=True) if isinstance(arg, tuple) else arg + for arg in input_args) + yield SampleInput( + *args, value=3.14 if dtype.is_floating_point or dtype.is_complex else 3 + ).with_metadata(broadcasts_input=broadcasts_input) + +def reference_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_addcmul_addcdiv( + op_info, device, dtype, requires_grad, **kwargs) + + # type promotion cases + supported_dtypes = op_info.supported_dtypes(device) + make_arg = partial(make_tensor, device=device, requires_grad=requires_grad) + + types = ( + (torch.float64, torch.complex128), + (torch.bfloat16, torch.float32), + ) + + values = ( + None, + True, False, + 3.14, 3, + 1.0, 1, + 0.0, 0, + -3.14, -3, + 3.14 + 2.71j, + ) + + for (type2, type3), value in product(types, values): + if (type2 not in supported_dtypes or + type3 not in supported_dtypes): + continue + + # RuntimeError: value cannot be converted without overflow + if (type(value) is complex and + type2 is not torch.complex128): + continue + + arg1 = make_arg([5, 5], dtype=dtype) + arg2 = make_arg([5, 5], dtype=type2) + arg3 = make_arg([1, 5], dtype=type3) + + # TypeError: addcdiv(): argument 'value' must be Number, not NoneType + if value is not None: + yield SampleInput(arg1, args=(arg2, arg3), kwargs=dict(value=value)) + else: + yield SampleInput(arg1, args=(arg2, arg3)) + +def sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs): + test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False), + ((1,), (S, S, S), (S, S, M), 1, 1, True), + ((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), + ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), + ((), (S, S, S), (S, S, M), 1, 1, True), + ((), (S, S, S), (S, S, M), 0.6, 0.2, True), + ] + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases: + yield SampleInput( + make_arg(input_shape), + make_arg(batch1_shape), + make_arg(batch2_shape), + beta=beta, + alpha=alpha + ).with_metadata(broadcasts_input=broadcasts_input) + + if dtype.is_complex: + yield SampleInput( + make_arg(input_shape), + make_arg(batch1_shape), + make_arg(batch2_shape), + beta=beta * (1 + 2j), + alpha=alpha * (2 + 3j), + ).with_metadata(broadcasts_input=broadcasts_input) + + if dtype.is_complex: + shapes = [(S, S, S), (S, M, S), (S, S, M)] + args = tuple(make_arg(s) for s in shapes) + yield SampleInput( + args[0].transpose_(-1, 1), + args[1].transpose(-1, 1).conj().requires_grad_(requires_grad), + args[2].transpose(-1, 1).conj().requires_grad_(requires_grad), + beta=beta * (1 + 2j), + alpha=alpha * (2 + 3j), + ) + +# TODO: add reduction kwargs +def sample_inputs_multilabel_soft_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + (S,), + (S, S), + ) + + for shape in shapes: + # Produce one with weight and one without. + yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), kwargs={}) + yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), + kwargs={'weight': _make_tensor(shape, requires_grad=False)}) + +def sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None + ) + yield SampleInput(make_arg(S, M), make_arg(S), make_arg(M)) + + yield SampleInput(make_arg(), make_arg(S), make_arg(M)).with_metadata(broadcasts_input=True) + + if dtype.is_complex: + alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j + elif dtype.is_floating_point: + alpha, beta = 0.2, 0.6 + else: + alpha, beta = 2, 3 + + yield SampleInput(make_arg(S, M), make_arg(S), make_arg(M), beta=beta, alpha=alpha) + + yield SampleInput( + make_arg(), + make_arg(S), + make_arg(M), + beta=beta, + alpha=alpha, + ).with_metadata(broadcasts_input=True) + + # These samples fail gradcheck + if dtype.is_floating_point and not requires_grad: + tensor_options = dict(device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput( + torch.tensor([[math.nan]], **tensor_options), + torch.tensor([0.0], **tensor_options), + torch.tensor([0.0], **tensor_options), + beta=0.0, + alpha=0.0, + ).with_metadata(broadcasts_input=True) + + yield SampleInput( + torch.tensor([[0.0]], **tensor_options), + torch.tensor([math.nan], **tensor_options), + torch.tensor([math.nan], **tensor_options), + beta=0.0, + alpha=0.0, + ).with_metadata(broadcasts_input=True) + +def sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ((), (S, S, S), (S,)) + + for shape in cases: + yield SampleInput(make_arg(shape)) + +def sample_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + make_weight = partial(_make_tensor, requires_grad=False) + + inputs = ( + ((), make_target([], low=0, high=1), {}), + ((S,), make_target([], low=0, high=S), {"p": 1}), + ((S,), make_target([1], low=0, high=S), {"p": 2}), + ((S, M), make_target([S], low=0, high=M), {"margin": 1.0}), + ((S, M), make_target([S], low=0, high=M), {"margin": -3.14}), + ((M, S), make_target([M], low=0, high=S), {"weight": None}), + ((M, S), make_target([M], low=0, high=S), {"weight": make_weight([S], low=-10., high=10.)}), + ((M, S), make_target([M], low=0, high=S), {"reduction": "none"}), + ((M, S), make_target([M], low=0, high=S), {"reduction": "mean"}), + ((M, S), make_target([M], low=0, high=S), {"reduction": "sum"}), + ) + + for input_shape, target, kwargs in inputs: + yield SampleInput(_make_tensor(input_shape), args=(target,), kwargs=kwargs) + + +def reference_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs) + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + make_weight = partial(_make_tensor, requires_grad=False) + + inputs = ( + ((), make_target([], low=0, high=1)), + ((S,), make_target([], low=0, high=S)), + ((S,), make_target([1], low=0, high=S)), + ((M, S), make_target([M], low=0, high=S)), + ) + ps = (1, 2) + margins = (0, 7, -3.14) + weights = (False, True) + reductions = (None, "none", "mean", "sum") + + for (input_shape, target), p, margin, weight, reduction in product(inputs, ps, margins, weights, reductions): + input = _make_tensor(input_shape) + weight_shape = [input.size(-1)] if input.ndim > 0 else [1] + weight = make_weight(weight_shape, low=-10., high=10.) if weight else None + kwargs = {"p": p, "margin": margin, "weight": weight} + if reduction is not None: + kwargs["reduction"] = reduction + yield SampleInput(input, args=(target,), kwargs=kwargs) + + +def error_inputs_multi_margin_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='abc is not a valid value for reduction') + # invalid input + yield ErrorInput(SampleInput(make_input(5, 0), args=(make_input(5,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[5, 0\]') + yield ErrorInput(SampleInput(make_input(0,), args=(make_input(5,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[0\]') + # invalid target + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={}), + error_type=RuntimeError, error_regex=r'inconsistent target size, expected 5 but got \[5, 4\]') + # invalid target dtype + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={}), + error_type=RuntimeError, error_regex='expected scalar type Long but found Float') + # invalid weight + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(())}), + error_type=ValueError, error_regex='weight must be one-dimensional') + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(5, 4)}), + error_type=ValueError, error_regex='weight must be one-dimensional') + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(5,)}), + error_type=RuntimeError, error_regex=r'inconsistent weight size, expected 4 but got \[5\]') + # invalid p + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'p': 3}), + error_type=ValueError, error_regex='only p == 1 and p == 2 supported') + + +def sample_inputs_logsumexp(self, device, dtype, requires_grad, **kwargs): + inputs = ( + ((), (0,), True), + ((S, S), (1,), True), + ((S, S), (1,), False), + ((S, S), (-2,), False), + ((S, S), (0, 1), False), + ) + # Test large inputs to check numerical stability + lows = (None, 1e3, 1e6) if dtype in (torch.float32, torch.float64) else (None,) + for low in lows: + high = low * 2 if low is not None else None + for shape, dim, keepdim in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=low, high=high, + requires_grad=requires_grad) + yield SampleInput(t, dim, keepdim) + +def reference_inputs_logsumexp(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_logsumexp(op, device, dtype, requires_grad, **kwargs) + + # https://github.com/pytorch/pytorch/issues/91843 + t = torch.tensor([20, 30, 100], dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(t, 0, False) + + t = torch.tensor((), dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(t, 0, False) + + # tests masking + # https://github.com/pytorch/pytorch/pull/91860#pullrequestreview-1241344073 + t = torch.tensor(float("inf")) + yield SampleInput(t, 0, True) + +def sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): + inputs = [ + ((), {}), + ((S, S), {}), + ((0, S, 0), {}), + ((S,), {'dtype': dtype, 'device': device}), + # Hard-code some dtypes/devices. We want to test cases where the + # (dtype, device) is different from the input's (dtype, device) + ((S,), {'dtype': torch.double}), + ((S,), {'device': 'cpu'}), + ((S,), {'dtype': torch.double, 'device': 'cpu'}), + ] + if torch.cuda.is_available(): + inputs.append(((S,), {'device': 'cuda'})) + + for shape, kwargs in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(t, **kwargs) + +def reference_inputs_like_fns(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_like_fns(op, device, dtype, requires_grad, **kwargs) + + # shape + cases = ( + (), (0,), (1, 0), (1, 1, 4, 5), (5, 3, 0, 1), (1, 4, 3, 1, 1) + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in cases: + yield SampleInput(make_arg(shape)) + yield SampleInput(make_arg(shape).transpose(0, -1)) + yield SampleInput(make_arg(shape, noncontiguous=True)) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) + +def sample_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + + inputs = ( + ([], make_target([], low=0, high=1), {}), + ([S], make_target([S], low=0, high=S), {}), + ([M, S], make_target([M, S], low=0, high=S), {}), + ([M, S], make_target([M, S], low=0, high=S), {"reduction": "none"}), + ([M, S], make_target([M, S], low=0, high=S), {"reduction": "mean"}), + ([M, S], make_target([M, S], low=0, high=S), {"reduction": "sum"}), + ) + + for shape, target, kwargs in inputs: + yield SampleInput(_make_tensor(shape), args=(target,), kwargs=kwargs) + + +def reference_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs) + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) + make_target_tensor = partial(torch.tensor, device=device, dtype=torch.long, requires_grad=False) + + inputs = ( + # random tests including -1 target labels + ([], make_target([], low=-1, high=1)), + ([S], make_target([S], low=-1, high=S)), + ([M, S], make_target([M, S], low=-1, high=S)), + # repeated target labels and -1 (labels after the first -1 are ignored) + ([], make_target_tensor(-1)), + ([7], make_target_tensor([2, 0, 6, -1, 4, -1, 6])), + ([4, 5], make_target_tensor([[4, -1, 0, -1, 2], [0, 0, 4, 1, 4], [-1, 3, -1, 1, 0], [4, 3, 2, 1, 0]])), + ) + reductions = (None, "none", "mean", "sum") + + for (shape, target), reduction in product(inputs, reductions): + kwargs = {} + if reduction is not None: + kwargs["reduction"] = reduction + yield SampleInput(_make_tensor(shape), args=(target,), kwargs=kwargs) + + +def error_inputs_multilabel_margin_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='abc is not a valid value for reduction') + # invalid input + yield ErrorInput(SampleInput(make_input(5, 0), args=(make_input(5, 4),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[5, 0\]') + yield ErrorInput(SampleInput(make_input(0,), args=(make_input(0,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[0\]') + # invalid target + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(4,),), kwargs={}), + error_type=RuntimeError, + error_regex=r'inconsistent target size: \[4\] for input of size: \[5, 4\]') + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input((),),), kwargs={}), + error_type=RuntimeError, + error_regex=r'inconsistent target size: \[\] for input of size: \[5, 4\]') + + +def get_independent_tensor(tensor): + return tensor.clone().requires_grad_(tensor.requires_grad) + +def sample_inputs_randint(self, device, dtype, requires_grad, **kwargs): + low = 2 + high = 10 + + for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): + sample.kwargs.setdefault('device', device) + # With high + yield SampleInput(high, sample.input.shape, *sample.args, **sample.kwargs) + # With low and high + yield SampleInput(low, high, sample.input.shape, *sample.args, **sample.kwargs) + +def sample_inputs_randint_like(self, device, dtype, requires_grad, **kwargs): + low = 2 + high = 10 + + for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): + # With high + yield SampleInput( + sample.input, + high, + *sample.args, + **sample.kwargs) + # With low and high + yield SampleInput( + get_independent_tensor(sample.input), + low, + high, + *sample.args, + **sample.kwargs) + +def sample_inputs_margin_ranking_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes = ( + (), + (S,), + (S, S), + (S, S, S), + ) + + margins = (0., 1.) + reductions = ('sum', 'mean', 'none') + + for shape in shapes: + for margin, reduction in product(margins, reductions): + kwargs = {'margin': margin, 'reduction': reduction} + yield SampleInput(_make_tensor(shape), + args=(_make_tensor(shape, requires_grad=False), + _make_tensor(shape, requires_grad=False)), + kwargs=kwargs) + +def reference_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs) + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for reduction in ('sum', 'mean', 'none'): + if dtype.is_floating_point: # only supports ints and floats + # NaN propagation + inp1 = make_input((10, )) + inp1[2] = float('nan') + inp2 = make_input((10, )) + inp2[4] = float('nan') + target = make_input((10, )) + inp2[9] = float('nan') + yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) + + # Inf handling + inp1 = make_input((10, )) + inp2[1] = float('inf') + inp2 = make_input((10, )) + inp2[4] = float('inf') + target = make_input((10, )) + inp2[7] = float('inf') + yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) + + # Broadcasting + inp1 = make_input((5, 2)) + inp2 = make_input((5, 1)) + target = make_input((1, 2)) + yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) + +def error_inputs_margin_ranking_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction value. + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='is not a valid value') + # invalid input shapes + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5,),)), + error_regex='margin_ranking_loss : All input tensors should') + +def sample_inputs_new_fns(self, device, dtype, requires_grad, *, is_strided=False, **kwargs): + # input_shape, output_shape, strides, kwargs + # lengths of output_shape and strides must be equal + inputs = [ + ((), (), (), {}), + ((S, S), (2, 0), (3, 4), {}), + ((0, S, 0), (3, 2, 2), (1, 2, 3), {}), + ((S,), (2, 3), (7, 8), {'dtype': dtype, 'device': device}), + # Hard-code some dtypes/devices. We want to test cases where the + # (dtype, device) is different from the input's (dtype, device) + ((S,), (10,), (S,), {'dtype': torch.double}), + ((S,), (1, 1, 12), (S, L, M), {'device': 'cpu'}), + ((S,), (2, 2, 2), (L, M, S), {'dtype': torch.double, 'device': 'cpu'}), + ] + if torch.cuda.is_available(): + inputs.append(((S,), (7, 2), (3, 4), {'device': 'cuda'})) + + for input_shape, output_shape, strides, kwargs in inputs: + t = make_tensor(input_shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + if is_strided: + yield SampleInput(t, output_shape, strides, **kwargs) + else: + yield SampleInput(t, output_shape, **kwargs) + +def sample_inputs_empty_strided(op, device, dtype, requires_grad=False, **kwargs): + + inputs = [ + ((), (), {'dtype': dtype, 'device': device}), + ((S,), (4,), {'dtype': dtype, 'device': device}), + ((S, S), (2, 1), {'dtype': dtype, 'device': device}), + ((S, S, S), (2, 0, 1), {'dtype': dtype, 'device': device}), + ] + + for shape, strides, kwargs in inputs: + yield SampleInput(shape, strides, requires_grad=requires_grad, **kwargs) + +def sample_inputs_empty(op, device, dtype, requires_grad, **kwargs): + # shape + cases = ( + (), (0,), (1,), (1, 3, 5), (5, 3, 1), (1, 0, 5, 1), + ) + + for case in cases: + yield SampleInput(case, device=device, dtype=dtype, requires_grad=requires_grad) + +def sample_inputs_empty_permuted(op, device, dtype, requires_grad, **kwargs): + # shape + cases = ( + (), (0,), (1,), (1, 3, 5), (5, 3, 1), (1, 0, 5, 1), + ) + + for case in cases: + for layout in itertools.permutations(range(len(case))): + yield SampleInput(case, layout, device=device, dtype=dtype, requires_grad=requires_grad) + +def error_inputs_empty_permuted(op_info, device, **kwargs): + yield ErrorInput( + SampleInput((2,), args=((0, 1),)), + error_type=RuntimeError, + error_regex="Number of dimensions in size does not match the length of the physical_layout" + ) + yield ErrorInput( + SampleInput((2,), args=((3,),)), + error_type=RuntimeError, + error_regex="Dimension out of range" + ) + yield ErrorInput( + SampleInput((2, 3), args=((0, 0),)), + error_type=RuntimeError, + error_regex="Duplicate dim not allowed" + ) + +def sample_inputs_scalar_tensor(op, device, dtype, requires_grad, **kwargs): + # Not including a scalar tensor in vals because meta tests start failing due to + # lack of meta support for _local_scalar_dense + # torch.tensor(2, device=device) + vals = (-5, 0, 1) + + for item in vals: + yield SampleInput(item, device=device, dtype=dtype, requires_grad=requires_grad) + +def sample_inputs_eye(op, device, dtype, requires_grad, **kwargs): + # only ints >= 0 are allowed for both arguments, unless m is omitted + sizes = (None, 0, 1, 2, 3, 4, 7, L, M, S) + + for n, m in product(sizes, sizes): + if n is None: + continue + + # TODO: no layout + _kwargs = {'device': device, 'dtype': dtype, 'requires_grad': requires_grad} + if m is None: + yield SampleInput(n, args=(), kwargs=_kwargs) + else: + yield SampleInput(n, args=(m,), kwargs=_kwargs) + +def error_inputs_eye(op_info, device, **kwargs): + # TODO: no layout + _kwargs = {'device': device, 'dtype': torch.float32} + + yield ErrorInput( + SampleInput(-1, args=(), kwargs=_kwargs), + error_regex="n must be greater or equal to 0, got -1" + ) + + yield ErrorInput( + SampleInput(-7, args=(42,), kwargs=_kwargs), + error_regex="n must be greater or equal to 0, got -7" + ) + + yield ErrorInput( + SampleInput(0, args=(-3,), kwargs=_kwargs), + error_regex="m must be greater or equal to 0, got -3" + ) + + +def sample_inputs_new_full(self, device, dtype, requires_grad, **kwargs): + def get_val(dtype): + return make_tensor([], dtype=dtype, device="cpu").item() + + for sample in sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs): + # The scalar we are passing to new_full must be the same dtype + # as the one of the resulting tensor + use_dtype = sample.kwargs['dtype'] if 'dtype' in sample.kwargs else dtype + yield SampleInput( + sample.input, *sample.args, get_val(use_dtype), **sample.kwargs) + +def sample_inputs_full_like(self, device, dtype, requires_grad, **kwargs): + def get_val(dtype): + return make_tensor([], dtype=dtype, device="cpu").item() + + inputs = [ + ((), get_val(dtype), {}), + ((S, S), get_val(dtype), {}), + ((0, S, 0), get_val(dtype), {}), + ((S,), get_val(dtype), {'dtype': dtype, 'device': device}), + # Hard-code some dtypes/devices. We want to test cases where the + # (dtype, device) is different from the input's (dtype, device) + ((S,), get_val(torch.double), {'dtype': torch.double}), + ((S,), get_val(dtype), {'device': 'cpu'}), + ((S,), get_val(torch.double), {'dtype': torch.double, 'device': 'cpu'}), + ] + if torch.cuda.is_available(): + inputs.append(((S,), get_val(dtype), {'device': 'cuda'})) + + for shape, fill_value, kwargs in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(t, fill_value, **kwargs) + +def sample_inputs_multinomial(self, device, dtype, requires_grad, **kwargs): + cases = [ + ([3], 3, {}), + ([10], 3, {}), + ([3, 10], 3, {}), + ([3], 3, dict(replacement=False)), + ([3], 3, dict(replacement=True)), + ([3, 4], 4, dict(replacement=True)), + ([3, 4], 4, dict(replacement=False)), + ] + + for shape, num_samples, kwargs in cases: + t = make_tensor(shape, dtype=dtype, device=device, + low=0, high=None, + requires_grad=requires_grad) + yield SampleInput(t, num_samples, **kwargs) + +def sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs): + def get_value_or_make_tensor(value_or_shape): + if isinstance(value_or_shape, list): + return make_tensor(value_or_shape, dtype=dtype, device=device, + low=0, high=None, + requires_grad=requires_grad) + return value_or_shape + + for value_or_mean_shape, value_or_std_shape, kwargs in cases: + mean = get_value_or_make_tensor(value_or_mean_shape) + std = get_value_or_make_tensor(value_or_std_shape) + yield SampleInput(mean, std, **kwargs) + +def sample_inputs_normal_tensor_first(self, device, dtype, requires_grad, **kwargs): + # value_or_size, value_or_size, kwargs + cases = [ + ([], [], {}), + ([3], [3], {}), + ([3, 4, 2], [3, 4, 2], {}), + ([2, 3], 1.1, {}), + ([1, 2, 3], [5, 2, 3], {}), # broadcasting + ] + + return sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs) + +def sample_inputs_normal_tensor_second(self, device, dtype, requires_grad, **kwargs): + yield SampleInput(1.6, 0.3, [2, 3], dtype=dtype, device=device) + yield SampleInput(1.6, 0.3, [2, 2, 2], dtype=dtype, layout=torch.strided, device=device) + yield SampleInput(2.7, make_tensor([4, 3], dtype=dtype, device=device, low=0, high=None, requires_grad=requires_grad)) + +def sample_inputs_bernoulli(self, device, dtype, requires_grad, **kwargs): + shapes = [ + [3], + [], + [0, 3], + [2, 3, 4], + ] + + for shape in shapes: + t = make_tensor(shape, dtype=dtype, device=device, + low=0, high=1, + requires_grad=requires_grad) + yield SampleInput(t) + +def error_inputs_bernoulli(op_info, device, **kwargs): + # more than one element of the written-to tensor refers to a single memory location + x = torch.rand((1,), device=device).expand((6,)) + err_msg = 'unsupported operation' + yield ErrorInput(SampleInput(torch.rand_like(x), kwargs={'out': x}), + error_regex=err_msg) + +def sample_inputs_logcumsumexp(self, device, dtype, requires_grad, **kwargs): + inputs = ( + ((S, S, S), 0), + ((S, S, S), 1), + ((), 0), + ) + + for large_number in (True, False): + for shape, dim in inputs: + t = make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad) + + if large_number and t.dim() > 0: + t[0] = 10000 + yield SampleInput(t, dim) + +def sample_inputs_trace(self, device, dtype, requires_grad, **kwargs): + yield SampleInput( + make_tensor((S, S), dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad)) + + +def error_inputs_trace(op, device): + yield ErrorInput(SampleInput(make_tensor((3, 4, 5), dtype=torch.float32, device=device)), error_regex="expected a matrix") + + +def sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + cases = (((S, S, S), (2, 1, 0.5)), + ((S, S, S), (2, -1, 0.5)), + ((S, S, S), (1, 2, 3)), + ((S, S, S), (float('inf'), 2, 0.5)), + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((1, 2, 3), (-1, -2)), + ((1, 2, 3), (-1, 2)), + ((1, 2, 3), (1, -2)), + ((1, 2, 3), (1, 2)), + ((), (0, 0)), + ((1, ), (0, 0)), + ((M, M), (0, 1)), + ((S, S, S), (2, 0)), ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + +def _numpy_ref_transpose(a, dim0, dim1): + if a.ndim <= 1: + return a + + return np.swapaxes(a, dim0, dim1) + +def sample_inputs_adjoint(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + shapes = ((1, 2, 3), (M, M), (S, S, S), (S, M, S), (M, S, M, S)) + return (SampleInput(make_arg(shape)) for shape in shapes) + +def sample_inputs_T(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + shapes = ((M, M), (M, L)) + return (SampleInput(make_arg(shape)) for shape in shapes) + +def error_inputs_T(self, device, has_ndims_error=False): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # Deprecated behavior in regular PyTorch, but throws an error in primTorch: + # https://github.com/pytorch/pytorch/issues/86968 + if has_ndims_error: + # ndims == 1 + yield ErrorInput(SampleInput(make_arg(M)), + error_regex=(r'The use of `x\.T` on tensors of dimension other than 0 or 2 ' + r'to reverse their shape is not supported\.')) + + # ndims > 2 + yield ErrorInput(SampleInput(make_arg(M, S, L)), + error_regex=(r'The use of `x\.T` on tensors of dimension other than 0 or 2 ' + r'to reverse their shape is not supported\.')) + + +def sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad=False, **kwargs): + """ + This function produces two tensors of shape (*, m, k) and (*, n, k) with k <= min(m, n). + Their matrix product could be used to generate tensor of shape (*, m, n) of rank k. + """ + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batches = [(), (0, ), (2, ), (1, 1)] + size = [1, 5, 10] + + for batch, m, n in product(batches, size, size): + for k in range(min(3, m, n)): + a = make_arg((*batch, m, k)) + b = make_arg((*batch, n, k)) + yield SampleInput(a, b, **kwargs) + + +def sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): + for sample in sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad, **kwargs): + *batch, m, k = sample.input.shape + *_, n, _ = sample.args[0].shape + + # NOTE: since svd_lowrank relies on non rank-revealing SVD, + # it inherits the problem of unstable behavior with repeated + # singular values including zeros. + # Since we want to avoid (repeated) zeros as singular values, + # we can only use k for q. + # This issues could be resolved with using a rank-revealing SVD + # which does not include "zero" singular values. + op_kwargs = { + 'q': k, + 'M': None + } + + # without M specified + yield clone_sample(sample, **op_kwargs) + + # now with M + # TODO: fix bug in the documentation for svd_lowrank: + # M has to be (*, m, n), and not (*, 1, n) as written + # in the documentation + op_kwargs['M'] = make_tensor((*batch, m, n), dtype=dtype, device=device, requires_grad=requires_grad) + yield clone_sample(sample, **op_kwargs) + +def chunk_iter(iterable, size): + it = iter(iterable) + while True: + chunk = tuple(islice(it, size)) + if not chunk: + break + yield chunk + +def sample_inputs_pca_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): + # we reuse samples from svd_lowrank which come in group of two with + # kwarg['M'] = None and with kwarg['M'] = + samples = sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad, **kwargs) + for s1, s2 in chunk_iter(samples, 2): + del s1.kwargs['M'] + del s2.kwargs['M'] + s1.kwargs['center'] = False + s2.kwargs['center'] = True + yield s1 + yield s2 + +def np_sinc_with_fp16_as_fp32(x): + # Wraps numpy's sinc function so that fp16 values are promoted to fp32 + # before sinc is invoked. Context: numpy's sinc returns NaN when evaluated + # at 0 for fp16. + if x.dtype == np.float16: + return np.sinc(x.astype(np.float32)) + else: + return np.sinc(x) + +def sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs): + test_cases = ( + ((S, 1, 1), (S, S, S)), + ((S, 1, S), (S, S, S)), + ((S, 1), (S, S, S)), + ((1,), (S, S, S)), + ((1, S), (1, 1, S)), + ((), ()), + ((), (1, 3, 2)), + ) + + return ( + SampleInput( + make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), + shape, + ) for size, shape in test_cases) + +def sample_inputs_broadcast_tensors(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + test_cases: Tuple[tuple] = (((3,), (1, 2, 1), (1, 1), (5, 1, 1),),) + + for shape, *other_shapes in test_cases: + yield SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)) + +def reference_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs) + + m = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + n = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) + + cases = ( + ((), (1, 1), (1, 1, 7, 1), (3, 1, 1)), + ((3, 5, 6), (1, 3, 5, 6), (1, 1, 1, 1, 6), (8, 3, 5, 6)) + ) + + for a, b, c, d in cases: + yield SampleInput(m(a), args=(m(b), m(c), m(d))) + yield SampleInput(n(a), args=(n(b), n(c), n(d))) + +def sample_inputs_block_diag(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + test_cases: Tuple[tuple] = ( + ((1, S), (2, S), (3, S),), + ((S, 1), (S, 2), (S, 3),), + ((1,), (2,), (3,),), + ((2, S), (S,)) + ) + + for shape, *other_shapes in test_cases: + yield SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)) + # We also want to test mixed complex-non-complex inputs to block_diag + if dtype == torch.complex32 or dtype == torch.complex64: + non_complex_dtype = torch.float32 if dtype == torch.complex32 else torch.float64 + make_arg_non_complex = partial(make_tensor, dtype=non_complex_dtype, device=device, requires_grad=requires_grad) + yield SampleInput(make_arg_non_complex(shape), args=tuple(make_arg(s) for s in other_shapes)) + +def sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs): + small_S = 2 + test_cases = ( + ((S, S, 2), (S, S + 1, 2)), + ((S, S), (S, S)), + ((S, S, S), (S, S, S)), + ((3, 5), (3, 5)), + ((2, 3, 5), (2, 3, 5)), + ((1, 2, 3), (1, 2, 3)), + ((1, 1), (S, 1)), + ((0, 5), (4, 5)), + ((4, 5), (0, 5)), + ((0, 4, 5), (3, 5)), + ((4, 5), (0, 3, 5)), + ((0, 4, 5), (1, 3, 5)), + ((1, 4, 5), (0, 3, 5)), + # Using S here would make this one test take 9s + ((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)), + ((small_S, 1, 1, small_S), (1, small_S, small_S)), + ((1, 1, small_S), (small_S, 1, small_S, small_S)), + ) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: + # FIXME add an override for JIT and revert 0. back to 0 + # since it's accepted by eager + for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float("inf")]: + for t1_size, t2_size in test_cases: + # The args should never be non-contiguous as this is not supported in the backward + yield SampleInput(make_arg(t1_size), make_arg(t2_size), p, cm) + +def _fill_np(a, value): + a = a.copy() + a.fill(value) + return a + +def _fill_sample_kwargs(device, dtype, input): + if dtype is torch.bool: + value = True + else: + value = 3 + + return ({'value': value}, {'value': value}) + +def sample_inputs_comparison_ops(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) + + # Adds a sample input where both tensors have the same values + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + lhs = make_arg((S, S)) + yield SampleInput(lhs, args=(lhs.clone(),)) + +def sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # shape x number of tensors + cases = ( + ((3, 4), 1), + ((1, 2, 1, 4), 3), + ((0, 1, 0), 2),) + + for shape, num_tensors in cases: + tensors = [] + for _ in range(num_tensors): + tensors.append(make_arg(shape)) + for dim in range(-1, len(shape) - 1): + yield SampleInput(tensors, args=(dim,)) + +def sample_inputs_cat_concat(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: Tuple[tuple, tuple, dict] = ( # type: ignore[assignment] + ((S, S), (S, S), {'dim': -1}), + ((S, S), (S, S), {'dim': 1}), + ((M, S), (S, S), {'dim': 0}), # different shapes + ((1, 2, 3), (1, 2, 3), {'dim': -2}), + ((0,), (0,), {'dim': 0}), # empty tensor + ((0,), (S, S), {'dim': 1}), # empty tensor with unempty and dim=1 (special case for legacy_cat_wrap_dim) + ((0, S), (S, S), {'dim': 0}), + ((1,), (1,), {}) # dim not passed, fallback to default + ) + + for input_shape1, input_shape2, kwargs in cases: + yield SampleInput([make_arg(input_shape1), make_arg(input_shape2)], kwargs=kwargs) + + # from coat_lite_mini + yield SampleInput([make_arg((2, 2, 2, 2), memory_format=torch.channels_last)], args=(1,),) + +def error_inputs_cat(op_info, device, **kwargs): + + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for more than one element of the written-to tensor refer to a single memory location + yield ErrorInput(SampleInput([make_arg((S, S)), make_arg((S, S))], + kwargs={'out': make_arg((1, S)).expand((2 * S, S))}), + error_regex='unsupported operation') + + # error inputs for empty tensors + yield ErrorInput(SampleInput([], kwargs={'dim': 1}), + error_regex='non-empty list of Tensors') + + # error inputs for different sizes + yield ErrorInput(SampleInput([make_arg((S, S, L, L)), make_arg((S, 0, L - 1, L))], kwargs={'dim': 1}), + error_regex='Sizes of tensors must match except in dimension') + yield ErrorInput(SampleInput([make_arg((S, 0, L - 1, L)), make_arg((S, S, L, L))], kwargs={'dim': 1}), + error_regex='Sizes of tensors must match except in dimension') + + # error inputs for different dimensions + yield ErrorInput(SampleInput([make_arg((S - 1, 0)), make_arg((S, 0, L - 1, L))], kwargs={'dim': 1}), + error_regex='Tensors must have same number of dimensions') + yield ErrorInput(SampleInput([make_arg((S, 0, L - 1, L)), make_arg((S - 1, 0))], kwargs={'dim': 1}), + error_regex='Tensors must have same number of dimensions') + + # error inputs for same memory locations + x = torch.zeros((0), device=device) + y = torch.randn((4, 6), device=device) + + err_msg = "the written-to tensor refer to a single memory location" + + yield ErrorInput(SampleInput((x, y), kwargs={'dim': 0, 'out': x}), + error_regex=err_msg) + yield ErrorInput(SampleInput((x, y), kwargs={'dim': 0, 'out': y}), + error_regex=err_msg) + + z = torch.zeros((4, 6), device=device) + yield ErrorInput(SampleInput((y, z), kwargs={'out': z[:2, :]}), + error_regex=err_msg) + + # error inputs for different devices + if torch.device(device).type == 'cuda': + x_cuda = make_tensor((3, 3), device=device, dtype=torch.float32) + y_cpu = make_tensor((3, 3), device='cpu', dtype=torch.float32) + yield ErrorInput(SampleInput((x_cuda, y_cpu)), + error_regex='Expected all tensors to be on the same device') + + # error inputs for different input sizes for more than 2 tensors + yield ErrorInput(SampleInput([make_arg((L, 1)), make_arg((L, 1, 1)), make_arg((L, 1, 1))]), + error_regex='Tensors must have same number of dimensions') + + yield ErrorInput(SampleInput([make_arg((S, 1, M)), make_arg((S, 1, 1)), make_arg((S, M, 1))], + kwargs={'dim': 1}), + error_regex='Sizes of tensors must match') + + # error inputs for None input + yield ErrorInput(SampleInput((make_arg((S, 1, 1)), None)), error_type=TypeError, + error_regex='got None') + + # error inputs for zero-dimensional tensors + yield ErrorInput(SampleInput([make_arg(()), make_arg(())]), + error_regex='zero-dimensional.*cannot be concatenated') + + # error inputs for different dtype of out tensors + d = make_tensor((2, 3), device=device, dtype=torch.double) + x = make_tensor((2, 3), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'out': d}), error_type=TypeError, + error_regex='invalid combination of arguments') + +def reference_inputs_cat(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_cat_concat(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Noncontiguous type promoting tensors + a = make_arg((3, 4, 2)) + b = make_arg((3, 2, 2), noncontiguous=True, dtype=torch.double) + c = make_arg((3, 3, 2), dtype=torch.float16).permute(1, 0, 2) + + yield SampleInput((a, b, c), kwargs={'dim': 1}) + + # Special 1D tensor with dim length of 0 case + a = make_arg((0,)) + b = make_arg((3, 2, 2)) + + yield SampleInput((a, b, a)) + yield SampleInput((a, a, a)) + +def _elementwise_type_promo_np(*args, type_promotion_kind): + def _maybe_torch(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x) + return x + + flattened = pytree.arg_tree_leaves(*args) + transformed = tuple(_maybe_torch(a) for a in flattened) + result_dtype, _ = prims.utils.elementwise_dtypes( + *transformed, + type_promotion_kind=type_promotion_kind) + return torch_to_numpy_dtype_dict[result_dtype] + +def _cat_np(input_seq, dim=0): + inputs = tuple(a for a in input_seq if not (a.ndim == 1 and a.size == 0)) + + if len(inputs) == 0: + np_dtype = _elementwise_type_promo_np( + input_seq, + type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH) + return np.empty(0, dtype=np_dtype) + + return np.concatenate(inputs, axis=dim) + +def _floor_divide_np(a, b): + dtype = _elementwise_type_promo_np( + a, + b, + type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) + if isinstance(a, np.ndarray): + a = a.astype(dtype) + if isinstance(b, np.ndarray): + b = b.astype(dtype) + return np.floor_divide(a, b) + +def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + tensor_shapes = ( + # First Tensor being 1-D is special + # case for hstack + ((S,), (S,), (S,)), + ((S, S), (S, S), (S, S)), + ) + for s1, s2, s3 in tensor_shapes: + tensors = (make_arg(s1,), make_arg(s2,), make_arg(s3)) + yield SampleInput(tensors) + +def error_inputs_hstack_dstack_vstack(op, device): + make_arg = partial(make_tensor, dtype=torch.int32, device=device, requires_grad=False) + tensor_shapes = ( + ((S,), (S, S, S, S), (S,)), + ) + for s1, s2, s3 in tensor_shapes: + tensors = (make_arg(s1,), make_arg(s2,), make_arg(s3)) + # Different dimension tensor + yield ErrorInput(SampleInput(tensors), error_regex="Tensors must have same number of dimensions") + + # empty tensor list + yield ErrorInput(SampleInput(()), error_regex="expects a non-empty TensorList") + +def sample_inputs_unbind(op_info, device, dtype, requires_grad, **kwargs): + # Note: we don't do any tests where we unbind along 0-length dims + # because in that case unbind returns and empty tuple, and that breaks + # some assumptions in some backward tests in test_ops.py + shape_dims = (((S,), 0), + ((S, S), 0), + ((S, S), 1), + ((S, S), -1), + ((S, 0, S), 0), + ((S, S, S), 1), + ) + for shape, dim in shape_dims: + yield SampleInput(make_tensor(shape, dtype=dtype, device=device, + requires_grad=requires_grad), + args=(dim,)) + +def error_inputs_unbind(op_info, device): + make_arg = partial(make_tensor, dtype=torch.int32, device=device, requires_grad=False) + yield ErrorInput(SampleInput(make_arg(()), args=(0,)), error_type=IndexError, + error_regex="Dimension specified as 0 but tensor has no dimensions") + yield ErrorInput(SampleInput(make_arg((2,)), args=(2,)), error_type=IndexError, + error_regex="Dimension out of range") + +def reference_unbind(t, dim): + """A numpy implementation of torch.unbind""" + return tuple(s.squeeze(dim) for s in np.split(t, t.shape[dim], dim)) + +def sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput( + make_arg((M, S)), + 0, + gather_variable((S, S), 1, M, True, device=device)) + yield SampleInput( + make_arg((M, S)), + 1, + gather_variable((M, S // 2), 0, S, True, device=device)) + # Empty index tensor case, see: https://github.com/pytorch/pytorch/pull/65006 + yield SampleInput( + make_arg((S,)), + 0, + torch.tensor([], dtype=torch.uint8, device=device)) + # 0D tensor case + yield SampleInput( + make_arg(()), + 0, + torch.tensor([0], dtype=torch.int64, device=device)) + yield SampleInput( + make_arg(()), + 0, + torch.tensor(0, dtype=torch.int64, device=device)) + +def _fill_indices(idx, dim, dim_size, elems_per_row, m, n, o): + for i in range(1 if dim == 0 else m): + for j in range(1 if dim == 1 else n): + for k in range(1 if dim == 2 else o): + ii = [i, j, k] + ii[dim] = slice(0, idx.size(dim) + 1) + idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row] + +def error_inputs_gather(op_info, device, **kwargs): + # src is [1, 2] + # [3, 4] + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + + # idx is [0, 0] + # [1, 0] + idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) + + # Index should be smaller than self except on dimension 1 + bad_src = make_tensor((1, 1), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(bad_src, args=(1, idx,)), + error_regex="Size does not match at dimension 0") + + # Index must have long dtype + bad_idx = idx.to(torch.int32) + yield ErrorInput(SampleInput(src, args=(1, bad_idx)), + error_regex="Expected dtype int64 for index") + + # TODO: FIXME + # out.dtype must match src.dtype + # Creates new src & idx since SampleInputs can't share tensors + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) + out = torch.empty((2, 2), device=device, dtype=torch.float64) + yield ErrorInput(SampleInput(src, args=(1, idx), kwargs={'out': out}), + error_regex="Expected out tensor to have dtype") + + # src and index tensors must have the same # of dimensions + # idx too few dimensions + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + idx = torch.tensor((0, 0), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(src, args=(1, idx)), + error_regex="Index tensor must have the same number of dimensions") + + # src too few dimensions + src = torch.tensor((1, 2), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(src, args=(0, idx)), + error_regex="Index tensor must have the same number of dimensions") + + # index out of bounds + # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices + if torch.device(device).type == 'cpu': + src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 23), (1, 0)), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(src, args=(1, idx,)), + error_regex="index 23 is out of bounds for dimension") + + x = torch.rand((1,), device=device).expand((3,)) + src = torch.rand((6,), device=device) + ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) + + yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=src)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(ind.clone(), args=(0, ind[1:],), kwargs=dict(out=ind[:1])), + error_type=RuntimeError, + error_regex='unsupported operation') + +def error_inputs_take(op_info, device, **kwargs): + x = torch.rand((1,), device=device).expand((3,)) + src = torch.rand((6,), device=device) + ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) + + yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=src)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(ind.clone(), args=(ind[1:],), kwargs=dict(out=ind[:-1])), + error_type=RuntimeError, + error_regex='unsupported operation') + +# Error inputs for scatter +def error_inputs_scatter_and_scatter_add(op_info, device, **kwargs): + # Error when self.dtype != src.dtype (and src is not a scalar) + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5), device=device, dtype=torch.double) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Expected self.dtype to be equal to src.dtype") + + # Index dtype must be long + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.int32) + dst = torch.zeros((3, 5), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Expected dtype int64 for index") + + # Index and destination must have the same number of dimensions + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5, 3), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Index tensor must have the same number of dimensions as self tensor") + + # Index and src must have the same number of dimensions when src is not a scalar + src = make_tensor((2, 5, 2), device=device, dtype=torch.float32) + idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="Index tensor must have the same number of dimensions as src tensor") + + # Index out of bounds + # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices + if torch.device(device).type == 'cpu': + src = make_tensor((2, 5), device=device, dtype=torch.float32) + idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) + dst = torch.zeros((3, 5), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(dst, args=(0, idx, src)), + error_regex="index 34 is out of bounds for dimension 0 with size 3") + +def error_inputs_renorm(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput(SampleInput(zero_d, args=(0.5, 0, 1.0)), error_type=RuntimeError, + error_regex="needs at least 2 dimensions, got 0 dimensions") + + +def error_inputs_ormqr(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput(SampleInput(zero_d, args=(zero_d, zero_d)), error_type=RuntimeError, + error_regex="input must have at least 2 dimensions") + + # https://github.com/pytorch/pytorch/issues/85218 + tensor_0 = torch.full((5, 0,), 1, device=device) + tensor_1 = torch.full((5,), 1, device=device) + tensor_2 = torch.full((5, 5,), 1, device=device) + bool_3 = True + bool_4 = True + yield ErrorInput(SampleInput(tensor_0, args=(tensor_1, tensor_2, bool_3, bool_4)), error_type=RuntimeError, + error_regex=r"tau.shape\[-1\] must be less than or equal to input.shape\[-1\]") + + +def error_inputs_diag(op_info, device, **kwargs): + zero_d = torch.randn((), device=device) + yield ErrorInput(SampleInput(zero_d, args=(0,)), error_type=RuntimeError, + error_regex="1D or 2D") + zero_d = torch.randn(1, 1, 1, device=device) + yield ErrorInput(SampleInput(zero_d, args=(0,)), error_type=RuntimeError, + error_regex="1D or 2D") + +def error_inputs_embedding(op_info, device, **kwargs): + indices = torch.rand(2, 2, device=device).long() + weights = [ + torch.tensor(1.0, device=device), + torch.tensor(1.0, device=device).reshape(1, 1, 1), + ] + + for weight in weights: + yield ErrorInput(SampleInput(weight, args=(indices,)), error_type=RuntimeError, + error_regex="'weight' must be 2-D") + + +def error_inputs_t(op_info, device, **kwargs): + yield ErrorInput( + SampleInput(torch.randn(2, 3, 4, 5, device=device)), + error_regex="expects a tensor with <= 2", + ) + + +def error_inputs_multinomial(op_info, device, **kwargs): + x = torch.empty(1, 2, 3, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(2,)), + error_regex="prob_dist must be 1 or 2 dim") + + x = torch.empty(1, 2, dtype=torch.long, device=device) + yield ErrorInput(SampleInput(x, args=(2,)), + error_regex="multinomial only supports floating-point dtypes for input") + + x = torch.empty(1, 2, dtype=torch.double, device=device) + y = torch.empty(1, 2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(2,), kwargs=dict(out=y)), + error_regex="multinomial expects Long tensor out") + + x = torch.empty(2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(0,)), + error_regex="cannot sample n_sample <= 0 samples") + + x = torch.empty(2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(-1,)), + error_regex="cannot sample n_sample <= 0 samples") + + x = torch.empty(2, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(3, False,)), + error_regex="cannot sample n_sample > prob_dist") + + x = torch.empty(16777217, dtype=torch.double, device=device) + yield ErrorInput(SampleInput(x, args=(3,)), + error_regex="number of categories cannot exceed") + + inputs = ((1., -1., 1.), (1., inf, 1.), (1., -inf, 1.), (1., 1., nan)) + + err_msg1 = "probability tensor contains either `inf`, `nan` or element < 0" + err_msg2 = "invalid multinomial distribution" + + rep_arg = (False, True) if torch.device(device).type == 'cpu' else (False,) + + for rep in rep_arg: + kwargs = {'num_samples': 2, 'replacement': rep} + + for shape in inputs: + # error case when input tensor contains `inf`, `nan` or negative element + yield ErrorInput(SampleInput(torch.tensor(shape), kwargs=kwargs), + error_regex=err_msg1 if rep is False else err_msg2) + + # error case for the invalid multinomial distribution (sum of probabilities <= 0), 1-D input + x = torch.zeros(3, device=device) + yield ErrorInput(SampleInput(x, kwargs=kwargs), + error_regex=err_msg2) + + # error case for the invalid multinomial distribution (sum of probabilities <= 0), 2-D input + x = torch.zeros(3, 3, device=device) + yield ErrorInput(SampleInput(x, kwargs=kwargs), + error_regex=err_msg2) + + # error case for the invalid multinomial distribution + x[1, :] = 1 + yield ErrorInput(SampleInput(x, kwargs=kwargs), + error_regex=err_msg2) + +def error_inputs_gradient(op_info, device, **kwargs): + for dtype in [torch.long, torch.float32, torch.complex64]: + t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device, dtype=dtype) + + dim = (1, 0) + spacing = [0.1] + yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), + error_type=RuntimeError, + error_regex='torch.gradient expected spacing to be unspecified, a scalar ') + + yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=3)), + error_type=RuntimeError, + error_regex='torch.gradient only supports edge_order=1 and edge_order=2.') + + dim = (1, 1) + spacing = 0.1 + yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), + error_type=RuntimeError, + error_regex='dim 1 appears multiple times in the list of dims') + + dim = (0, 1) + coordinates = [torch.tensor([1, 2, 4], device='cpu'), torch.tensor([1, 2, 4], device='meta')] + yield ErrorInput(SampleInput(t, kwargs=dict(spacing=coordinates, dim=dim, edge_order=1)), + error_type=RuntimeError, + error_regex='torch.gradient expected each tensor to be on the same device,') + + yield ErrorInput(SampleInput(t, kwargs=dict(dim=3)), + error_type=IndexError, error_regex='') + + t = torch.tensor([[1], [2], [3]]) + yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=1)), + error_type=RuntimeError, + error_regex='torch.gradient expected each dimension size to be at least') + + t = torch.tensor([[1, 2], [3, 4]]) + yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=2)), + error_type=RuntimeError, + error_regex='torch.gradient expected each dimension size to be at least') + +def error_inputs_rrelu(op_info, device, **kwargs): + input = make_tensor((S, S), device=device, dtype=torch.float32) + yield ErrorInput(SampleInput(input, kwargs={'lower': 0.3, 'upper': 0.1}), + error_regex='Lower bound should be less than or equal to the upper bound') + +def error_inputs_masked_select(op_info, device, **kwargs): + x = torch.rand((1,), device=device).expand((3,)) + y = torch.rand((6,), device=device) + mask = torch.tensor([True, False, True, True, False, False], device=device) + + yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=y)), + error_type=RuntimeError, + error_regex='unsupported operation') + + yield ErrorInput(SampleInput(mask.clone(), args=(mask,), kwargs=dict(out=mask)), + error_type=RuntimeError, + error_regex='unsupported operation') + +def error_inputs_median(op_info, device, **kwargs): + x = torch.tensor([[[[[[[[[[[[[[[[[[[[[[[[[nan], + [nan]]]]]]]]]]]]]]]]]]]]]]]]], device=device) + if device == 'cuda': + yield ErrorInput(SampleInput(x, kwargs=dict(dim=(-1))), + error_type=RuntimeError, + error_regex='CUDA Tensors cannot have more than 25 dimensions') + else: + return + + +def error_inputs_index_select(op_info, device, **kwargs): + x = torch.rand((1, 6), device=device).expand((2, 6)) + y = torch.rand((3, 6), device=device) + ind = torch.tensor([0, 1], dtype=torch.int64, device=device) + + yield ErrorInput(SampleInput(y, args=(1, ind,), kwargs=dict(out=x)), + error_type=RuntimeError, + error_regex='unsupported operation') + +def error_inputs_index_add(op_info, device, **kwargs): + result = torch.tensor([[1., 2.], [4., 5.], [7., 8.]]) + source = torch.tensor([2., 4.]) + + yield ErrorInput(SampleInput(result, args=(0, torch.tensor([0, 2]), source)), + error_type=RuntimeError, + error_regex=r'source tensor shape must match self tensor shape, ' + r'excluding the specified dimension. Got self.shape = \[3, 2\] source.shape = \[2\]') + +def error_inputs_logcumsumexp(op_info, device, **kwargs): + dim = 3 + srcs = [torch.randn(5, 2, device=device), torch.randn(0, 2, device=device)] + for src in srcs: + yield ErrorInput(SampleInput(src, args=(dim,)), + error_type=IndexError, + error_regex='Dimension out of range') + +def sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput( + make_arg((S, S)), gather_variable((S, S), 1, S, True, device=device), 0) + + # `indices` broadcast + yield SampleInput( + make_arg((S, S)), gather_variable((1, S // 2), 0, S, True, device=device), 1) + + # `self` broadcast + yield SampleInput( + make_arg((1, S)), gather_variable((S, S // 2), 0, S, True, device=device), 1) + + # without `dim` arg + yield SampleInput( + make_arg((S, S)), gather_variable((S, S // 2), 0, S, True, device=device)) + + +def error_inputs_aminmax_amax_amin(op_info, device, is_ref=False, **kwargs): + + # Error Inputs for zero-dim tensors, when 'dim' arg is not provided. + shape = (S, 0, S) + err_msg_amax_amin = "reduction" + err_msg_aminmax = "cannot compute aminmax over an empty dimension as the operation has no identity" + if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: + yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_amax_amin) + elif op_info.name in ['aminmax']: + yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_aminmax) + + # Error Inputs for tensors with more than 64 dimension + sizes = [1] * 65 + err_msg1 = "only tensors with up to 64 dims are supported" + yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': -1}), + error_regex=err_msg1) + yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': 64}), + error_regex=err_msg1) + + # Error Inputs for repeated 'dim' + if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: + dims = [(0, 0), (0, -4)] + err_msg2 = "in the list of dims" + x = torch.randn(S, S, S, S, device=device) + for dim in dims: + yield ErrorInput(SampleInput(x, kwargs={'dim': dim}), error_regex=err_msg2) + + # Error Input for illegal dtype + input5 = torch.randn(L, L, dtype=torch.float32, device=device) + max_values = torch.empty(L, dtype=torch.float32, device=device) + min_values = torch.empty(L, dtype=torch.double, device=device) + illegal_values = torch.empty(L, dtype=torch.int, device=device) + + # Unlike regular PyTorch, amax and amin refs don't require input and out + # dtypes to match exactly: + # https://github.com/pytorch/pytorch/pull/87765#pullrequestreview-1162023824 + if is_ref: + err_msg_amax_amin2 = ("Attempting to cast from torch.float32 to out tensor with dtype " + "torch.int32, but this can't be cast because it is not safe!") + else: + err_msg_amax_amin2 = ("Expected the dtype for input and out to match, but got Float " + "for input's dtype and Int for out's dtype.") + err_msg_aminmax2 = "Expected out tensor to have dtype float, but got double instead" + + if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: + yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': illegal_values}), + error_regex=err_msg_amax_amin2) + elif op_info.name in ['aminmax']: + yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': (max_values, min_values)}), + error_regex=err_msg_aminmax2) + + # Error Inputs for functions to raise an error on specified zero'd dimension as reduction dim + err_msg3 = "reduction" + # FIXME: eager and ref impl throw different types of errors + error_type = IndexError if 'refs' not in op_info.name else RuntimeError + yield ErrorInput(SampleInput(torch.rand(shape, device=device), kwargs={'dim': 1}), + error_type=error_type, error_regex=err_msg3) + +def sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs): + test_cases: Tuple[tuple, dict] = ( # type: ignore[assignment] + ((S, S, S), {}), + ((S, S, S), {'dim': 1}), + ((S, S, S), {'dim': 1, 'keepdim': True}), + ((), {'dim': 0}), + ((), {}), + ((), {'dim': 0, 'keepdim': True}), + ((S, 0, S), {'dim': 0}), + ) + + for shape, kwargs in test_cases: + yield SampleInput( + make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), + **kwargs) + +def error_inputs_diff(op_info, device, **kwargs): + t = torch.rand((1, 3), device=device) + n = -1 + yield ErrorInput(SampleInput(t, args=(n, ), kwargs=kwargs), + error_type=RuntimeError, + error_regex=f'order must be non-negative but got {n}') + +def sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + test_cases = ( + ((1,), 0, None, None), + ((S,), 0, None, None), + ((S, 1), 0, None, None), + ((S, 1), 1, None, None), + ((S, S), 0, None, None), + ((S, S), 1, None, None), + ((S, S), 0, (1, S), (2, S)), + ((S, S), 0, None, (2, S)), + ((XS, XS, XS), 1, None, None), + ((XS, XS, XS), 2, None, None), + ((XS, XS, XS), 1, (XS, 1, XS), (XS, 1, XS)), + ((XS, XS, XS), 2, (XS, XS, 1), (XS, XS, 1)), + ((XS, XS, XS), 2, (XS, XS, XS), (XS, XS, XS)),) + + sample_inputs = [] + for size, dim, size_prepend, size_append in test_cases: + prepend_size = 0 if (size_prepend is None) else size_prepend[dim] + append_size = 0 if (size_append is None) else size_append[dim] + dim_size = size[dim] + prepend_size + append_size + for n in range(dim_size): + input_tensor = make_arg(size) + prepend = make_arg(size_prepend) if size_prepend else None + append = make_arg(size_append) if size_append else None + yield SampleInput(input_tensor, n, dim, prepend, append) + + # add some samples with n > dim_size + yield SampleInput(make_arg((XS, XS, XS)), S + 1, 1) + yield SampleInput(make_arg((XS, XS, XS)), S * 3 + 2, 2, make_arg((XS, XS, XS)), make_arg((XS, XS, XS))) + +def sample_inputs_histogram(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]): + input_tensor = make_arg(size) + weight_tensor = make_arg(size) if weighted else None + + yield SampleInput(input_tensor, bin_ct, + weight=weight_tensor, density=density) + + bins_tensor = make_arg((bin_ct + 1,)) + yield SampleInput(input_tensor, bins_tensor, + weight=weight_tensor, density=density) + +def sample_inputs_histogramdd(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((S, S), (S, S, S), (S, 1, S), (S, 0, S)) + bin_ct_patterns = ((1, 1, 1, 1, 1), (2, 3, 2, 3, 2), (3, 2, 3, 2, 3)) + + for size, bin_ct_pattern, weighted, density in product(sizes, bin_ct_patterns, [False, True], [False, True]): + input_tensor = make_arg(size) + bin_ct = bin_ct_pattern[:size[-1]] + weight_tensor = make_arg(size[:-1]) if weighted else None + + yield SampleInput(input_tensor, bin_ct, + weight=weight_tensor, density=density) + + bins_tensor = [make_arg(ct + 1) for ct in bin_ct] + yield SampleInput(input_tensor, bins_tensor, + weight=weight_tensor, density=density) + +def error_inputs_histogramdd(opinfo, device, **kwargs): + invalid_bins = [1, 1, 1, 1, 1] + make_arg = partial(make_tensor, dtype=torch.float, device=device, requires_grad=False) + msg = "histogramdd: The size of bins must be equal to the innermost dimension of the input." + yield ErrorInput(SampleInput(make_arg(5, 6), invalid_bins), error_regex=msg) + +def sample_inputs_histc(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + for size, min, max in product(sizes, [0, -10], [0, 10]): + # construct sample input omitting bins arg + yield SampleInput(make_arg(size), min=min, max=max) + + # construct sample inputs with a few different bins values + for bins in [1, 3, 10]: + yield SampleInput(make_arg(size), bins=bins, min=min, max=max) + +def sample_inputs_bincount(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + for size, weighted in product((S, M), [False, True]): + input_tensor = torch.randint(0, size, (size,), dtype=dtype, device=device) + weight_tensor = make_arg((size,)) if weighted else None + + max_val = int(input_tensor.max().item()) + + for minlength in [0, max_val // 2, max_val, 2 * max_val]: + yield SampleInput( + input_tensor, weights=weight_tensor, minlength=minlength) + +def sample_inputs_bucketize(op_info, device, dtype, requires_grad, reference_inputs_mode=False, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = (((), S), ((S,), S), ((S, S), S), ((S, S, S), S), ((S, 1, S), S), ((S, 0, S), S)) + + if reference_inputs_mode: + sizes += (((256,), 128), ((128,), 256), ((32, 32), 11), ((32, 4, 32), 33)) + + for (input_shape, nb), out_int32, right in product(sizes, [False, True], [False, True]): + input_tensor = make_arg(input_shape) + boundaries = make_arg(nb).msort() + + yield SampleInput(input_tensor, boundaries, + out_int32=out_int32, right=right) + +reference_inputs_bucketize = partial(sample_inputs_bucketize, reference_inputs_mode=True) + +def error_inputs_bucketize(opinfo, device, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float, device=device, requires_grad=False) + yield ErrorInput(SampleInput(make_arg((S, S, S)), make_arg((S, S))), + error_regex="boundaries tensor must be 1 dimension") + +def sample_inputs_searchsorted(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # (unsorted tensor size, (input sizes,), is_scalar) + sizes = ( + ((0,), ((0,),), False), + ((M,), ((), (M,), (M, M)), False), + ((0, 0), ((0, 0),), False), + ((M, M), ((M, M),), False), + ((0, 0, 0), ((0, 0, 0),), False), + ((M, M, M), ((M, M, M),), False), + ((L,), ((),), True), + ) + + for (size, input_sizes, is_scalar), noncontiguous, out_int32, right in product( + sizes, [False, True], [False, True], [False, True] + ): + unsorted_tensor = make_arg(size, noncontiguous=noncontiguous) + for input_size in input_sizes: + input = make_arg(input_size, noncontiguous=noncontiguous) + if is_scalar: + input = input.item() + if np.prod(size) == 0: + boundary_tensor = unsorted_tensor + sorter = make_tensor(size, dtype=torch.int64, device=device, noncontiguous=noncontiguous) + else: + boundary_tensor, sorter = torch.sort(unsorted_tensor) + side = "right" if right else "left" + + yield SampleInput(boundary_tensor, input, out_int32=out_int32, right=right) + yield SampleInput(boundary_tensor, input, out_int32=out_int32, side=side) + + yield SampleInput(unsorted_tensor, input, out_int32=out_int32, right=right, sorter=sorter) + yield SampleInput(unsorted_tensor, input, out_int32=out_int32, side=side, sorter=sorter) + +def sample_inputs_gradient(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + test_cases_float = ( + ((S,), None, None, 1), + ((S,), 2., None, 1), + ((S, S), None, None, 2), + ((S, S), [2.0, 2.1], None, 1), + ((S, S), [2.0, 2.1], (0, 1), 1), + ((4, 4, 4), [2., 1.], (0, 1), 2), + ) + for size, spacing, dim, edge_order in test_cases_float: + t = make_arg(size) + yield SampleInput(t, dim=dim, spacing=spacing, edge_order=edge_order) + + test_cases_tensor = ( + ((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1), + ((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2), + ) + for size, coordinates, dim, edge_order in test_cases_tensor: + t = make_arg(size) + coordinates_tensor_list = [] + for coords in coordinates: + # `coords` will always contain floating point values and Python 3.10 does not support this + # implicit conversion to an integer using `__int__` + # TODO: this can be simplified after https://github.com/pytorch/pytorch/issues/69316 is fixed + a = torch.tensor(coords, device=device) + coordinates_tensor_list.append(a.to(dtype)) + yield SampleInput(t, dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order) + +def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + test_args = [ + ([1, 2],), + (slice(0, 3),), + ([slice(0, 3), 1],), + ([[0, 2, 3], [1, 3, 3], [0, 0, 2]],), + ([[0, 0, 3], [1, 1, 3], [0, 0, 2]],), + ([slice(None), slice(None), [0, 3]],), + ([slice(None), [0, 3], slice(None)],), + ([[0, 3], slice(None), slice(None)],), + ([[0, 3], [1, 2], slice(None)],), + ([[0, 3], ],), + ([[0, 3], slice(None)],), + ([[0, 3], Ellipsis],), + ([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],), + (index_variable(2, S, device=device),), + (mask_not_all_zeros((S,)),), + ] + + for args in test_args: + yield SampleInput(make_arg((S, S, S)), args=args) + + yield SampleInput(make_arg((S, S, S, S)), args=([slice(None), [0, 1], slice(None), [0, 1]],)) + +def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + for accumulate in [False, True]: + # Test with indices arg + yield SampleInput( + make_arg((S, S,)), + (index_variable(2, S, device=device),), + make_arg((2, S)), + accumulate=accumulate) + + # Test with mask arg + mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,)) + yield SampleInput( + make_arg((S, S)), (mask, ), make_arg((S,)), accumulate=accumulate) + +def sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs): + def small_3d_unique(): + res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S) + res = res.to(dtype).requires_grad_(requires_grad) + return res + + def large_1d_unique(): + res = torch.randperm(L * L * L, dtype=torch.int64, device=device) + res = res.to(dtype).requires_grad_(requires_grad) + return res + + # Test case for large tensor. + yield SampleInput(large_1d_unique()) + + # Test cases for small 3d tensors. + # Imitates legacy tests from test/test_torch.py + dims = range(-3, 3) + flag = [True, False] + for dim, descending, stable in product(dims, flag, flag): + # default schema without stable sort + yield SampleInput(small_3d_unique(), dim, descending) + # schema with stable sort, no CUDA support yet + if torch.device(device).type == 'cpu': + yield SampleInput( + small_3d_unique(), dim=dim, descending=descending, stable=stable) + + # Test cases for scalar tensor + tensor_opt = dict(dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(torch.tensor(1, **tensor_opt)) + yield SampleInput(torch.tensor(1, **tensor_opt), 0) + yield SampleInput(torch.tensor(1, **tensor_opt), 0, True) + + # Test cases for empty tensor + yield SampleInput(torch.tensor((), **tensor_opt)) + yield SampleInput(torch.tensor((), **tensor_opt), 0) + yield SampleInput(torch.tensor((), **tensor_opt), 0, True) + + # Test cases for stable sort + yield SampleInput(small_3d_unique(), stable=True) + yield SampleInput(small_3d_unique(), dim=0, stable=True) + yield SampleInput(small_3d_unique(), dim=0, descending=True, stable=True) + +def sample_inputs_threshold(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + sizes = ((), (S,), (S, S), (S, S, S)) + for x_size in sizes: + # threshold and values args must be numbers + yield SampleInput(make_arg(x_size), make_arg(()).item(), make_arg(()).item()) + +def sample_inputs_unique(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + for shape, sorted, return_inverse, return_counts, dim in \ + product(sizes, [False, True], [False, True], [False, True], [None, -2, -1, 0, 1, 2]): + # torch.unique cannot be called if the input tensor has a zero dimension which isn't the selected dim + if 0 in shape and shape.index(0) is not dim: + continue + + # skip invalid dim args + if dim is not None and (dim < -len(shape) or dim >= len(shape)): + continue + + kwargs = dict(sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim) + + # construct a test case with only one distinct value + input_t = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(input_t, **kwargs) + + # construct a test case with mixed 0s and 1s + input_t = make_arg(shape, dtype=torch.bool, requires_grad=False)\ + .to(dtype).requires_grad_(requires_grad) + yield SampleInput(input_t, **kwargs) + + # construct a test case with many different values + yield SampleInput(make_arg(shape), **kwargs) + +def sample_inputs_unique_consecutive(*args, **kwargs): + for sample_input in sample_inputs_unique(*args, **kwargs): + if not sample_input.kwargs["sorted"]: + sample_input.kwargs.pop("sorted") + yield sample_input + +def sample_inputs_adaptive_avg_pool1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + ((0, 8, 8), (5,)), + ((3, 8, 8), 5), + ((3, 8, 8), 1) + ) + + for input_shape, output_size in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=(output_size,)) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) + + +def error_inputs_adaptive_avg_pool1d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3)), output_size=()), + error_regex="'output_size' should contain one int") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1)), output_size=(-1,)), + error_regex="elements of output_size must be greater than or equal to 0") + + +def sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + ((1, 8, 8, 8), (5, 7)), + ((2, 8, 8, 8), (None, 7)), + ((1, 8, 4, 3), (5, None)), + ((1, 8, 4, 3), (None, None)), + ((1, 8, 4, 3), (5)), + ) + + for input_shape, output_size in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=(output_size,)) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) + + +def error_inputs_adaptive_avg_pool2d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2)), output_size=(2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 3") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="output_size must be 2") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1)), output_size=(-1, 0)), + error_regex="elements of output_size must be greater than or equal to 0") + + +def sample_inputs_adaptive_avg_pool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + ((0, 8, 8, 8, 8), (5, 7, 4)), + ((1, 8, 4, 3, 7), (None, None, None)), + ((1, 8, 4, 3, 7), (1, 1, 1)), + ((3, 3, 8, 8, 6), (5, 7, None)), + ((1, 3, 8, 8, 6), (5, None, 2)), + ((3, 3, 8, 8, 6), (None, 3, 2)), + ) + + for input_shape, output_size in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=(output_size,)) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) + + +def error_inputs_adaptive_avg_pool3d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2, 2)), output_size=(2, 2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 4") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="output_size must be 3") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1, 1)), output_size=(-1, 0, 2)), + error_regex="elements of output_size must be greater than or equal to 0") + + +def sample_inputs_adaptive_max_pool1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + # ((0, 8, 8), (5,)), + # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] + ((3, 4, 4), 3), + ((3, 4, 4), 1) + ) + + for shapes, return_idx in product(cases, (True, False)): + # Batched + yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) + # Unbatched + yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) + + +def error_inputs_adaptive_max_pool1d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3)), output_size=()), + error_regex="'output_size' should contain one int") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1)), output_size=(-1,)), + error_regex="Trying to create tensor with negative dimension") + +def sample_inputs_adaptive_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + # ((0, 8, 8, 8), (5, 7)), + # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] + ((1, 4, 4, 4), (2, 3)), + ((2, 4, 4, 4), (None, 3)), + ((2, 4, 4, 4), (1, 1)), + ((1, 4, 4, 3), (3, None)), + ((1, 4, 4, 3), (None, None)), + ((1, 4, 4, 3), (3)), + ) + + for shapes, return_idx in product(cases, (True, False)): + # Batched + yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) + # Unbatched + yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) + +def error_inputs_adaptive_max_pool2d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2)), output_size=(2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 3") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="internal error") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1)), output_size=(-1, 0)), + error_regex="Trying to create tensor with negative dimension") + + +def sample_inputs_adaptive_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as (input shape, output size) + cases = ( + # ((0, 8, 8, 8, 8), (5, 7, 4)), + # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] + ((1, 4, 4, 3, 5), (None, None, None)), + ((1, 4, 4, 3, 5), (1, 1, 1)), + ((3, 3, 4, 4, 6), (2, 3, None)), + ((1, 3, 4, 4, 6), (3, None, 2)), + ((3, 3, 4, 4, 6), (None, 3, 2)), + ) + + for shapes, return_idx in product(cases, (True, False)): + # Batched + yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) + # Unbatched + yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) + +def error_inputs_adaptive_max_pool3d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for incorrect input dimension + yield ErrorInput(SampleInput(make_arg((2, 2, 2)), output_size=(2, 2, 2)), + error_type=ValueError, error_regex="Input dimension should be at least 4") + + # error inputs for empty output + yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), + error_regex="internal error") + + # error inputs for output_size lesser than 0 + yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1, 1)), output_size=(-1, 0, 2)), + error_regex="Trying to create tensor with negative dimension") + + +class _TestParamsMaxPoolBase: + + def __init__(self): + self.kwargs = { + 'kernel_size': [3], + 'stride': [2, None], + 'ceil_mode': [True, False], + 'padding': [0, 1], + 'dilation': [1], + 'return_indices': [True, False] + } + + self.shapes = [ + [1, 2, None], # batch + [2], # channels + [3, 6] # signal + ] + + def _gen_shape(self): + for shape in product(*self.shapes): + # shape[0] is None indicates missing batch dimension + if shape[0] is None: + shape = shape[1:] + + yield shape, torch.contiguous_format + # only 2d (N, C, H, W) rank 4 tensors support channels_last memory format + if len(self.shapes) == 4 and len(shape) == 4: + yield shape, torch.channels_last + + def _gen_kwargs(self): + keys = self.kwargs.keys() + for values in product(*self.kwargs.values()): + yield dict(zip(keys, values)) + + def gen_input_params(self): + yield from product(self._gen_shape(), self._gen_kwargs()) + +class _TestParamsMaxPool1d(_TestParamsMaxPoolBase): + + def __init__(self): + super().__init__() + self.kwargs['kernel_size'] += [(3,)] + self.kwargs['stride'] += [(2,)] + self.kwargs['padding'] += [(1,)] + self.kwargs['dilation'] += [(1,)] + +class _TestParamsMaxPool2d(_TestParamsMaxPoolBase): + + def __init__(self): + super().__init__() + self.kwargs['kernel_size'] += [(3, 2)] + self.kwargs['stride'] += [(2, 1)] + self.kwargs['padding'] += [(1, 1)] + self.kwargs['dilation'] += [(1, 2)] + + self.shapes.append([6]) + +class _TestParamsMaxPool3d(_TestParamsMaxPoolBase): + + def __init__(self): + super().__init__() + self.kwargs['kernel_size'] += [(3, 2, 3)] + self.kwargs['stride'] += [(2, 1, 2)] + self.kwargs['dilation'] += [(1, 2, 1)] + + self.shapes.append([6]) + self.shapes.append([5]) + +def sample_inputs_max_pool(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + params_generator_type_dict = { + 'nn.functional.max_pool1d': _TestParamsMaxPool1d, + 'nn.functional.max_pool2d': _TestParamsMaxPool2d, + 'nn.functional.max_pool3d': _TestParamsMaxPool3d, + 'max_pool2d_with_indices_backward': _TestParamsMaxPool2d, + } + + params_generator = params_generator_type_dict[op_info.name]() + for (shape, memory_format), kwargs in params_generator.gen_input_params(): + arg = make_arg(shape).to(memory_format=memory_format).requires_grad_(requires_grad) + yield SampleInput(arg, kwargs=kwargs) + +def max_pool2d_backward(*args, kernel_size=(), stride=(), padding=(0,), dilation=(1,), ceil_mode=False, **kwargs): + out, indices = torch.nn.functional.max_pool2d_with_indices( + *args, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, ceil_mode=ceil_mode, return_indices=True) + grad_out = torch.ones_like(out) + if stride is None: + stride = kernel_size + out_b = torch.ops.aten.max_pool2d_with_indices_backward.default( + grad_out, *args, kernel_size, stride, padding, dilation, ceil_mode, indices) + return out_b + +def error_inputs_max_pool1d(op_info, device, **kwargs): + # Toggle requires_grad because `max_pool1d` has different path + # based on whether `requires_grad` is set or not. + for requires_grad in (True, False): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=requires_grad) + # error inputs when pad is negative + x = make_arg((0, 1, 49)) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of kernel size') + + # error inputs for input tensor + error_msg = r'Expected 2D or 3D \(batch mode\) tensor with optional 0 dim batch size for input' + yield ErrorInput(SampleInput(make_arg((), requires_grad=requires_grad), kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error inputs for empty input + yield ErrorInput(SampleInput(torch.tensor([], device=device, requires_grad=requires_grad), + kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error: unbatched input with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((0, 10), requires_grad=requires_grad), + kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error: batched input with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((1, 10, 0), requires_grad=requires_grad), + kwargs={'kernel_size': 1}), + error_regex=error_msg) + + # error inputs for empty input with stride=0 + error_msg = 'stride must be greater than zero, but got 0' + yield ErrorInput(SampleInput(make_arg((3, 3, 3)), kwargs={'kernel_size': 1, 'stride': 0}), + error_regex=error_msg) + + # error inputs for empty input with dilation=0 + error_msg = 'dilation must be greater than zero, but got 0' + yield ErrorInput(SampleInput(make_arg((3, 3, 3)), + kwargs={'kernel_size': 1, 'stride': 1, 'padding': 0, 'dilation': 0}), + error_regex=error_msg) + + # error inputs for invalid output size + error_msg = 'Invalid computed output size: -2' + yield ErrorInput(SampleInput(make_arg((2, 2, 2)), + kwargs={'kernel_size': 5, 'stride': 1, 'padding': 0, 'dilation': 1}), + error_regex=error_msg) + + # error inputs when kernel_size=0 + error_msg = 'kernel_size must be greater than zero' + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 0}), + error_regex=error_msg) + + # error inputs for strides > 0 + error_msg = 'stride must be greater than zero' + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 0}), + error_regex=error_msg) + + +def error_inputs_max_pool2d(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + # error inputs when pad is negative + x = make_arg((0, 1, 49)) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + # 2-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 (kernel_size : int) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of kernel size') + + # error inputs when pad > kernel_size / 2 (kernel_size : tuple) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of kernel size') + + # error: unbatched input with 0 sized non-batch dims. + err_msg = r'Expected 3D or 4D \(batch mode\) tensor with optional 0 dim batch size for input' + yield ErrorInput(SampleInput(make_arg((1, 0, 10)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + # error: batched input with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((2, 1, 10, 0)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + +def error_inputs_max_pool3d(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + # error inputs when pad is negative + x = make_arg((0, 1, 49, 50)) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + # 3-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, + 'padding': -1, 'return_indices': True}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 (kernel_size: int) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of kernel size') + + # error inputs when pad > kernel_size / 2 (kernel_size: tuple) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, + 'padding': 4, 'return_indices': True}), + error_regex='pad should be at most half of kernel size') + + # error: unbatched input with 0 sized non-batch dims. + err_msg = r'Expected input\'s non-batch dimensions to have positive length' + yield ErrorInput(SampleInput(make_arg((0, 1, 2, 10)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + # error: batched inputs with 0 sized non-batch dims. + yield ErrorInput(SampleInput(make_arg((2, 1, 0, 1, 2)), + kwargs={'kernel_size': 1}), + error_regex=err_msg) + + +def sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad) + + cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] + ((2, 1, 4, 5), {'p': 1., 'dim': 2}), + ((2, 3, 4, 5), {'p': 2., 'dim': 1}), + ((1, 2, 4, 5), {'p': 0.5, 'dim': 0}), + ((1, 3, 4, 5), {'p': -1., 'dim': 1}), + ((1, 3, 4, 5), {'p': 0., 'dim': -1}), + ((), {'p': 1.2, 'dim': 0}), + ((2, 3, 4, 5), {}), + ((2, 3, 4, 5), {'eps': 1e-4})) + + for input_shape, kwargs in cases: + yield SampleInput(make_arg(input_shape), kwargs=kwargs) + + +def complex_conv(fn, input_size, weight, grad_output, stride, padding, dilation, groups): + # conv(W, x, b) = conv(Wr, xr, br) - conv(Wi, xi, 0) + i(conv(Wi, xr, bi) + conv(Wr, xi, 0)) + # a = conv(Wr, xr, br), + # b = conv(Wi, xi, 0), + # c = conv(Wr + Wi, xr + xi, br + bi) + # conv(W, x, b) = a - b + i(c - a - b) + + grad_output_ = torch.view_as_real(grad_output) + grad_output_r = grad_output_[..., 0] + grad_output_i = grad_output_[..., 1] + + weight_ = torch.view_as_real(weight) + weight_r = weight_[..., 0] + weight_i = weight_[..., 1] + + a = fn(input_size, weight_r, grad_output_r, stride, padding, dilation, groups) + b = fn(input_size, weight_i, grad_output_i, stride, padding, dilation, groups) + c = fn(input_size, weight_r + weight_i, grad_output_r + grad_output_i, stride, padding, dilation, groups) + + return (a - b) + 1j * (c - a - b) + + +def conv_transpose_ref(input, weight, bias, stride=1, padding=0, + output_padding=0, dilation=1, groups=1, + fn=None): + # Derivative of `conv` is `conv_transpose`. + # To verify the correctness of `conv_transpose`, + # we rely `torch.nn.grad` implementation (which is tested in test_nn.py) + # for floating dtypes. + + assert fn is not None + + grad_fn_map = {torch.nn.functional.conv_transpose1d: torch.nn.grad.conv1d_input, + torch.nn.functional.conv_transpose2d: torch.nn.grad.conv2d_input, + torch.nn.functional.conv_transpose3d: torch.nn.grad.conv3d_input} + batched_dim_map = {torch.nn.functional.conv_transpose1d: 3, + torch.nn.functional.conv_transpose2d: 4, + torch.nn.functional.conv_transpose3d: 5} + + # Input for `ref` is ndarray. + input, weight = torch.from_numpy(input), torch.from_numpy(weight) + + is_batched = len(input.shape) == batched_dim_map[fn] + if not is_batched: + input = input.unsqueeze(0) + + if bias is not None: + bias = torch.from_numpy(bias) + unsqueeze_dims = input.ndim - 2 + for _ in range(unsqueeze_dims): + bias = bias.unsqueeze(1) + + grad_output = input + # Get the input shape for grad_fn. + conv_transpose_output = fn(grad_output.to('meta'), weight.to('meta'), None, + stride=stride, padding=padding, output_padding=output_padding, + groups=groups, dilation=dilation) + input_size = conv_transpose_output.shape + + grad_fn = grad_fn_map[fn] + if weight.dtype.is_complex: + out = complex_conv(grad_fn, input_size, weight, grad_output, stride, padding, dilation, groups) + else: # Floating + out = grad_fn(input_size, weight, grad_output, stride, padding, dilation, groups) + + if bias is not None: + out = out + bias + + return out.squeeze(0) if not is_batched else out + + +def sample_inputs_conv_transpose1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, output_padding, groups, dilation) + cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 3, 4), (3, 3, 3), (3,), + {'stride': (2,), 'padding': 2, 'output_padding': (1,), 'groups': 1}), + ((2, 2, 4), (2, 2, 4), (4,), + {'stride': (3,), 'padding': (1,), 'output_padding': (2,), 'groups': 2, 'dilation': (4,)}), + ((1, 1, 4), (1, 1, 4), (1,), + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2,)}), + ((1, 1, 4), (1, 2, 3), None, + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), + ((1, 4, 5), (4, 8, 3), None, + {}) + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, output_padding, groups, dilation) + cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 3, 4, 4), (3, 3, 3, 3), (3,), + {'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}), + ((2, 2, 4, 4), (2, 2, 4, 5), (4,), + {'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}), + ((1, 1, 4, 5), (1, 1, 4, 3), (1,), + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}), + ((1, 1, 4, 3), (1, 2, 3, 4), None, + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), + ((2, 4, 4, 4), (4, 1, 3, 3), None, {'groups': 4}), + ((1, 2, 5, 5), (2, 4, 3, 3), None, {}) + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + +def sample_inputs_conv_transpose3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, output_padding, groups, dilation) + cases: Tuple[Tuple[int], Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 3, 4, 4, 4), (3, 3, 3, 3, 3), (3,), + {'stride': (2, 2, 2), 'padding': 2, 'output_padding': (1, 1, 1), 'groups': 1}), + ((2, 2, 4, 4, 4), (2, 2, 4, 5, 6), (4,), + {'stride': (3, 2, 1), 'padding': (1, 2, 3), 'output_padding': (2, 3, 1), 'groups': 2, 'dilation': (4, 4, 4)}), + ((1, 1, 4, 5, 2), (1, 1, 4, 3, 1), (1,), + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3, 2)}), + ((1, 1, 4, 3, 4), (1, 2, 3, 4, 5), None, + {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), + ((1, 4, 5, 5, 5), (4, 8, 3, 3, 3), None, + {}) + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def sample_inputs_conv1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias, + # and a dict of values of (stride, padding, dilation, groups) + cases: Tuple = ( + ((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'groups': 1}), + ((2, 4, 8), (2, 2, 3), (2,), {'stride': 3, 'padding': 1, 'groups': 2, 'dilation': 2}), + ((1, 4, 5), (1, 4, 3), None, {'stride': (2,), 'padding': 'valid'}), + ((2, 2, 4), (2, 1, 4), (2,), {'stride': (1,), 'padding': 'same', 'groups': 2, 'dilation': (2,)}), + # With defaults + ((1, 4, 5), (3, 4, 3), None, {}), + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def error_inputs_conv1d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float64) + make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) + make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_int_arg((1, 1, 4)), args=(make_int_arg((1, 1, 2)), make_arg((1,)))), + error_regex="should be the same") + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 2)), make_complex_arg((1,)))), + error_regex="should be the same") + + # error inputs for negative strides + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2, 2)), make_arg((1,))), + kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") + + # error inputs for negative padding + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2, 2)), make_arg((1,))), + kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") + + # error inputs for negative dilation + yield ErrorInput( + SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 2)), make_arg((1,))), + kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") + + # FIXME: https://github.com/pytorch/pytorch/issues/85656 + # error inputs for bias shape not equal to the output channels + # yield ErrorInput(SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 3)), make_arg((2,)))), + # error_regex="expected bias to be 1-dimensional with 1 elements") + + # error inputs for input.ndim != weight.ndim + yield ErrorInput(SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2)), make_arg((1,)))), + error_regex="weight should have at least three dimensions") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': -1}), error_regex="non-positive groups is not supported") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 0}), error_regex="non-positive groups is not supported") + + +def error_inputs_conv2d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float64) + make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) + make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_int_arg((2, 4, 4)), args=(make_int_arg((3, 2, 3, 3)), make_arg((3,)))), + error_regex="should be the same") + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_arg((2, 4, 4)), args=(make_arg((3, 2, 3, 3)), make_complex_arg((3,)))), + error_regex="should be the same") + + # error inputs for negative strides + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4)), args=(make_arg((1, 2, 2, 3)), make_arg((1,))), + kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") + + # error inputs for negative padding + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 3)), args=(make_arg((1, 2, 2, 4)), make_arg((1,))), + kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") + + # error inputs for negative dilation + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 2)), args=(make_arg((1, 1, 2, 5)), make_arg((1,))), + kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") + + # FIXME: https://github.com/pytorch/pytorch/issues/85656 + # error inputs for bias shape not equal to the output channels + # yield ErrorInput(SampleInput(make_arg((1, 1, 4, 4)), args=(make_arg((1, 1, 3, 2)), make_arg((2,)))), + # error_regex="expected bias to be 1-dimensional with 1 elements") + + # error inputs for input.ndim != weight.ndim + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 3)), args=(make_arg((1, 2, 2)), make_arg((1,))), + kwargs={'padding': 'same'}), error_regex="Expected 3-dimensional input for 3-dimensional weight") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 1, 3)), make_arg((2,))), + kwargs={'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for groups the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 1, 3)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 5)), args=(make_arg((2, 2, 1, 4)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': -1}), error_regex="non-positive groups is not supported") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 4, 3)), make_arg((2,))), + kwargs={'padding': 'same', 'groups': 0}), error_regex="non-positive groups is not supported") + + +def sample_inputs_conv2d(op_info, device, dtype, requires_grad, jit_fail_sample=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and a dict of values of (stride, padding, groups, dilation) + cases: Tuple = ( + ((1, 3, 4, 4), (3, 3, 3, 3), (3,), + {'stride': (2, 2), 'padding': 2, 'groups': 1}), + ((2, 4, 8, 8), (2, 2, 3, 3), (2,), + {'stride': (3, 2), 'padding': (2, 1), 'groups': 2, 'dilation': (4, 4)}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), + ((1, 2, 4, 3), (4, 2, 3, 4), None, + {'stride': 2, 'padding': 1, 'groups': 1}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 2, 'padding': "valid"}), + ((1, 4, 5, 5), (1, 4, 2, 3), (1,), + {'stride': 1, 'padding': "same", 'dilation': 3}), + # Below are the group related samples from common_nn.py + ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4}), + ((2, 4, 6, 6), (8, 1, 3, 3), (8,), {'groups': 4}), + ((2, 4, 6, 6), (8, 1, 3, 3), None, {'groups': 4}), + ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'stride': (3, 2)}), + ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'padding': (1, 1)}), + ((2, 4, 5, 5), (4, 1, 2, 2), (4,), {'groups': 4, 'dilation': (2, 2)}), + ((2, 4, 6, 5), (6, 2, 3, 2), (6,), {'groups': 2}), + # With defaults + ((1, 4, 5, 5), (3, 4, 3, 3), None, {}), + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def sample_inputs_conv3d(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as shapes for input, weight, bias + # and dict of values of (stride, padding, dilation, groups) + cases: Tuple = ( + ((1, 1, 4, 4, 4), (1, 1, 1, 1, 1), (1,), {'padding': 'same'}), + ((1, 1, 4, 4, 4), (1, 1, 4, 4, 4), (1,), {'stride': (2, 2, 2)}), + ((1, 1, 5, 5, 5), (1, 1, 3, 3, 3), (1,), {'dilation': 2}), + ((1, 1, 1, 1, 10), (1, 1, 1, 1, 4), None, {'padding': 'valid'}), + ((1, 1, 10, 11, 12), (1, 1, 1, 2, 5), None, {'padding': 'same'}), + ((1, 1, 10, 11, 12), (1, 1, 1, 2, 5), None, {'padding': 'same', 'dilation': 2}), + ((1, 1, 10, 11, 12), (1, 1, 4, 4, 4), None, {'padding': 'same', 'dilation': 3}), + ((1, 1, 1, 1, 10), (1, 1, 1, 1, 4), None, {'padding': 'valid'}), + ((3, 9, 3, 1, 9), (3, 3, 3, 1, 9), (3,), {'groups': 3}), + ((3, 9, 3, 1, 9), (3, 3, 3, 1, 9), (3,), {'stride': (2, 2, 2), 'dilation': 1, 'groups': 3}), + ) + + for input_shape, weight, bias, kwargs in cases: + # Batched + yield SampleInput(make_arg(input_shape), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + # Unbatched + yield SampleInput(make_arg(input_shape[1:]), args=( + make_arg(weight), + make_arg(bias) if bias is not None else bias + ), kwargs=kwargs) + + +def error_inputs_conv3d(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float64) + make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) + make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_int_arg((1, 1, 4, 4, 4)), args=(make_int_arg((1, 1, 2, 2, 2)), make_arg((1,)))), + error_regex="should be the same") + + # error inputs for different dtypes of input tensor and bias + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_complex_arg((1,)))), + error_regex="should be the same") + + # error inputs for negative strides + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), + kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") + + # error inputs for negative padding + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), + kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") + + # error inputs for negative dilation + yield ErrorInput( + SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), + kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") + + # FIXME: https://github.com/pytorch/pytorch/issues/85656 + # error inputs for bias shape not equal to the output channels + # yield ErrorInput(SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 3, 3, 3)), make_arg((2,)))), + # error_regex="expected bias to be 1-dimensional with 1 elements") + + # error inputs for input.ndim != weight.ndim + yield ErrorInput( + SampleInput(make_arg((1, 1, 3, 4, 5)), args=(make_arg((1, 1, 4, 3)), make_arg((1,))), + kwargs={'padding': 'same'}), error_regex="Expected 4-dimensional input for 4-dimensional weight") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), + make_arg((2,))), kwargs={'groups': 3}), + error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for the weight[0] are less than the number of groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), + make_arg((2,))), kwargs={'padding': 'same', 'groups': 3}), + error_regex="expected weight to be at least 3 at dimension 0") + + # error inputs for invalid groups + yield ErrorInput( + SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), + make_arg((2,))), kwargs={'padding': 'same', 'groups': 0}), + error_regex="non-positive groups is not supported") + + # error inputs for padding='same' not supported by strided convolutions + yield ErrorInput( + SampleInput(make_arg((18, 27, 9, 1, 9)), args=(make_arg((9, 9, 9, 1, 9)), + make_arg((9,))), kwargs={'stride': 2, 'padding': 'same', 'groups': 3}), + error_regex="padding='same' is not supported for strided convolutions") + + +def sample_inputs_group_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, num groups, and kwargs for eps + cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment] + ((1, 6, 3), 2, {'eps' : 0.5}), + ((2, 6, 3), 2, {'eps' : -0.5}), + ((1, 3), 1, {'eps' : 1e-5}), + ((0, 2), 1, {'eps' : 1e-5}), + ((S, S, S), 1, {'eps' : 0.5}), + ) + + # num_channels is inferred to be input.shape[1] dimension + for input_shape, num_groups, kwargs in cases: + # Shape of weight and bias should be the same as num_channels + channels = input_shape[1] if len(input_shape) > 1 else 0 + weight_tensor = make_arg(channels) + bias_tensor = make_arg(channels) + + # Checking for permutations of weights and biases as `None` + weights = [weight_tensor, None] + biases = [bias_tensor, None] + for weight, bias in itertools.product(weights, biases): + kwargs = { + 'weight': weight, + 'bias': bias, + **kwargs + } + yield SampleInput(make_arg(input_shape), num_groups, **kwargs) + + # Without any optional args + yield SampleInput(make_arg((1, 2)), args=(1,)) + +def reference_inputs_group_norm(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_group_norm( + op_info, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, num groups, and kwargs for eps + cases: Tuple[Tuple[int], int, float] = ( # type: ignore[assignment] + ((20, 6, 10, 10), 3, {'eps' : 1e-5}), + # equivalent with InstanceNorm + # GroupNorm(C, num_groups=C) == InstanceNorm(num_features=C) + ((20, 6, 10, 10), 6, {'eps' : 1e-5}), + # equivalent with LayerNorm + # GroupNorm(C, num_groups=1, affine=False) == LayerNorm(normalized_shape=[C, H, W], elementwise_affine=False) + ((20, 6, 10, 10), 1, {'eps' : 1e-5}), + ) + + # num_channels is inferred to be input.shape[1] dimension + for input_shape, num_groups, kwargs in cases: + # Shape of weight and bias should be the same as num_channels + channels = input_shape[1] if len(input_shape) > 1 else 0 + input_tensor = make_arg(input_shape) + weight_tensor = make_arg(channels) + bias_tensor = make_arg(channels) + + # Checking for permutations of weights and biases as `None` + weights = [weight_tensor, None] + biases = [bias_tensor, None] + for weight, bias in itertools.product(weights, biases): + kwargs = { + 'weight': weight, + 'bias': bias, + **kwargs + } + yield SampleInput(input_tensor, num_groups, **kwargs) + + +def sample_inputs_instance_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + # Ordered as: input shape, kwargs for momentum, eps + cases: Tuple[Tuple[int], dict] = ( # type: ignore[assignment] + ((S, S, S), {'momentum': 0.5, 'eps': 0.6}), + ((S, S, S), {'momentum': 0.5, 'eps': 0.6, 'use_input_stats': True}), + ((3, 2, 4), {'momentum': -1.2}), + ((3, 2, 4), {'momentum': 0.0}), + ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), + ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), + ) + + for input_shape, kwargs in cases: + # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) + channels = input_shape[1] + weight = make_arg(channels) + bias = make_arg(channels) + running_mean = make_arg_without_requires_grad(channels, low=0) + running_var = make_arg_without_requires_grad(channels, low=0) + new_kwargs = { + 'running_mean': running_mean, + 'running_var': running_var, + 'weight': weight, + 'bias': bias, + **kwargs + } + + yield SampleInput( + make_arg(input_shape), + args=(), + kwargs=new_kwargs + ) + + # Checking for permutations of weights and biases as `None` + # instance_norm assumes that if there's a bias, there's a weight + weights = [channels, None] + biases = [None, None] + + for weight_channels, bias_channels in zip(weights, biases): + running_mean = make_arg_without_requires_grad(channels, low=0) + running_var = make_arg_without_requires_grad(channels, low=0) + yield SampleInput( + make_arg(input_shape), + args=(), + kwargs={ + 'running_mean': running_mean, + 'running_var': running_var, + 'weight': make_arg(weight_channels) if weight_channels is not None else None, + 'bias': make_arg(bias_channels) if bias_channels is not None else None + } + ) + + # Test case for no optional kwargs + yield SampleInput(make_arg((1, 2, 3)), kwargs={}) + + +def sample_inputs_layer_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, normalized_shape and a kwarg dict for eps + cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 2, 3), (1, 2, 3), {'eps': 0.5}), + ((2, 2, 3), (2, 3), {'eps': -0.5}), + ((1,), (1,), {}), + ((1, 2), (2,), {}), + ((0, 1), (1,), {}), + ) + + for input_shape, normalized_shape, kwargs in cases: + # Shape of weight and bias should be the same as normalized_shape + weight = make_arg(normalized_shape) + bias = make_arg(normalized_shape) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight, bias), + kwargs=kwargs + ) + # Without any optional args + yield SampleInput(make_arg((1, 2)), args=((2,),)) + + # TODO: @krshrimali, once to_numpy method in SampleInput class is modified to take None inputs, + # enable these inputs; see https://github.com/pytorch/pytorch/pull/63276#discussion_r691950400 + + # With weight and a `None` bias + # yield SampleInput(make_arg((1, 2)), args=((2,), make_arg((2,)), None)) + + # With `None` weight and bias (tests failing for this, see the link above) + # yield SampleInput(make_arg((1, 2)), args=((2,), None, make_arg((2,)))) + + +def sample_inputs_native_layer_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, normalized_shape, eps + cases: Tuple[Tuple[int], Tuple[int], float] = ( # type: ignore[assignment] + ((1, 2, 3), (1, 2, 3), 0.5), + ((2, 2, 3), (2, 3), -0.5), + ((1,), (1,), 1e-5), + ((1, 2), (2,), 1e-5), + ((0, 1), (1,), 1e-5), + ) + + for input_shape, normalized_shape, eps in cases: + # Shape of weight and bias should be the same as normalized_shape + weight = make_arg(normalized_shape) + bias = make_arg(normalized_shape) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight, bias, eps), + ) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, None, bias, eps), + ) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, weight, None, eps), + ) + yield SampleInput( + make_arg(input_shape), + args=(normalized_shape, None, None, eps), + ) + +def error_inputs_group_norm(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + + # check that input has minimum number of dimensions + err_msg1 = "Expected at least 2 dimensions for input tensor but received" + s1 = SampleInput(make_arg(1), args=(1,)) + yield ErrorInput(s1, error_regex=err_msg1) + + # check that the channels dimension is compatible with number of groups + err_msg2 = "Expected number of channels in input to be divisible by num_groups, but got input of shape" + s2 = SampleInput(make_arg((2, 7, 4)), args=(2,)) + yield ErrorInput(s2, error_regex=err_msg2) + +def error_inputs_native_layer_norm(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) + input_shape = (1, 2, 3) + + err_msg1 = "Expected normalized_shape to be at least 1-dimensional" + s1 = SampleInput( + make_arg(input_shape), args=(tuple(), None, None, 1e-5) + ) + yield ErrorInput(s1, error_regex=err_msg1) + + normalized_shape = (1, 2, 3) + weight = make_arg((1, 2)) + err_msg2 = "Expected weight to be of same shape as normalized_shape" + s2 = SampleInput( + make_arg(input_shape), args=(normalized_shape, weight, None, 1e-5) + ) + yield ErrorInput(s2, error_regex=err_msg2) + + bias = make_arg((1, 2)) + err_msg3 = "Expected bias to be of same shape as normalized_shape" + s3 = SampleInput( + make_arg(input_shape), args=(normalized_shape, None, bias, 1e-5) + ) + yield ErrorInput(s3, error_regex=err_msg3) + + err_msg4 = "Given normalized_shape=" + s4 = SampleInput( + make_arg((2, 2, 3)), args=((2, 2), None, None, 1e-5) + ) + yield ErrorInput(s4, error_regex=err_msg4) + + +def sample_inputs_local_response_norm(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Ordered as input shape, size and a kwarg dict for alpha, beta, and k + cases: Tuple[Tuple[int], Tuple[int], dict] = ( # type: ignore[assignment] + ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ((1, 6, 3), 2, {'beta': 0.5, 'k': 1.25}), + ((1, 6, 3), 2, {'alpha': 3e-05, 'k': 1.25}), + ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5}), + ((1, 6, 3), 2, {'alpha': 3e-05}), + ((1, 6, 3), 2, {'beta': 0.5}), + ((1, 6, 3), 2, {'k': 1.25}), + ((1, 6, 3), 2, {}), + ((2, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ((1, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ((0, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), + ) + + for input_shape, size, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(size,), kwargs=kwargs) + +def sample_inputs_hardswish(self, device, dtype, requires_grad, **kwargs): + N = 5 + # make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ? + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-5, high=5) + return (SampleInput(make_arg((N * 2, N * 2))) for _ in range(1, N)) + +def sample_inputs_linear(self, device, dtype, requires_grad, **kwargs): + features_options = [[3, 4], [8, 8]] + batch_options: List[List[int]] = [ + [], # no batch + [0], + [8], + [2, 3], + ] + create_tensor = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-2, high=2) + + for has_bias, (in_feat, out_feat), batch_shape in \ + itertools.product([True, False], features_options, batch_options): + input_tensor = create_tensor(batch_shape + [in_feat]) + weight = create_tensor([out_feat, in_feat]) + if not has_bias: + yield SampleInput(input_tensor, weight) + continue + + bias = create_tensor([out_feat]) + yield SampleInput(input_tensor, weight, bias) + +def sample_inputs_bilinear(self, device, dtype, requires_grad, **kwargs): + features_options = [[3, 4, 5], [8, 8, 8]] + batch_options: List[List[int]] = [ + [], # no batch + [0], + [8], + [2, 3], + ] + create_tensor = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-2, high=2) + + for has_bias, (in_feat1, in_feat2, out_feat), batch_shape in \ + itertools.product([True, False], features_options, batch_options): + input_tensor1 = create_tensor(batch_shape + [in_feat1]) + input_tensor2 = create_tensor(batch_shape + [in_feat2]) + weight = create_tensor([out_feat, in_feat1, in_feat2]) + if not has_bias: + yield SampleInput(input_tensor1, input_tensor2, weight) + continue + bias = create_tensor([out_feat]) + yield SampleInput(input_tensor1, input_tensor2, weight, bias) + +def sample_inputs_glu(self, device, dtype, requires_grad, **kwargs): + features_options = [[2], [2, 4], [8, 8], [3, 6, 8], [1, 4, 6, 7]] + batch_options: List[List[int]] = [ + [], # no batch + [0], + [8], + [2, 3], + ] + create_tensor = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=-2, high=2) + + for features, batch_shape in itertools.product(features_options, batch_options): + ndim = len(features) + len(batch_shape) + for dim in range(ndim): + input_tensor = create_tensor(batch_shape + features) + dim_size = input_tensor.size(dim) + if dim_size > 0 and dim_size % 2 == 0: + yield SampleInput(input_tensor, dim) + +def sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs): + N, C = 2, 3 + D = 4 + S = 3 + L = 5 + + align_corners_options: Tuple[Any, ...] = (None,) + if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'): + align_corners_options = (True, False, None) + ranks_for_mode = { + 'nearest': [1, 2, 3], + 'nearest-exact': [1, 2, 3], + 'linear': [1], + 'bilinear': [2], + 'bicubic': [2], + 'trilinear': [3], + 'area': [1, 2, 3] + } + + def shape(size, rank, with_batch_channel=True): + if with_batch_channel: + return tuple([N, C] + ([size] * rank)) + return tuple([size] * rank) + + if mode in ('bilinear', 'bicubic') and dtype == torch.uint8: + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype + high=256 if dtype == torch.uint8 else None, + ) + # provide few samples for a more close to typical image processing usage + rank = 2 + for memory_format in [torch.contiguous_format, torch.channels_last]: + yield SampleInput( + make_arg(shape(270, rank), memory_format=memory_format), + shape(130, rank, False), + scale_factor=None, + mode=mode, + align_corners=False, + ) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for align_corners in align_corners_options: + for rank in ranks_for_mode[mode]: + yield SampleInput( + make_arg(shape(D, rank)), + shape(S, rank, False), + scale_factor=None, + mode=mode, + align_corners=align_corners, + ) + yield SampleInput( + make_arg(shape(D, rank)), + shape(L, rank, False), + scale_factor=None, + mode=mode, + align_corners=align_corners, + ) + for recompute_scale_factor in [False, True]: + for scale_factor in [1.7, 0.6]: + yield SampleInput( + make_arg(shape(D, rank)), + size=None, + scale_factor=scale_factor, + mode=mode, + align_corners=align_corners, + recompute_scale_factor=recompute_scale_factor, + ) + +def reference_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs) + + if mode in ('bilinear', 'bicubic'): + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype + high=256 if dtype == torch.uint8 else None, + ) + # provide few samples for more typical image processing usage + for memory_format in [torch.contiguous_format, torch.channels_last]: + for aa in [True, False]: + yield SampleInput( + make_arg((2, 3, 345, 456), memory_format=memory_format), + (270, 270), + scale_factor=None, + mode=mode, + align_corners=False, + antialias=aa, + ) + +def sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs): + N, C = 2, 3 + D = 4 + S = 3 + L = 5 + + ranks_for_mode = { + 'nearest': [1, 2, 3], + 'bilinear': [2], + } + + def shape(size, rank, with_batch_channel=True): + if with_batch_channel: + return torch.Size([N, C] + ([size] * rank)) + return torch.Size([size] * rank) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for rank in ranks_for_mode[mode]: + yield SampleInput(make_arg(shape(D, rank)), size=shape(S, rank, False)) + yield SampleInput(make_arg(shape(D, rank)), size=shape(L, rank, False)) + yield SampleInput(make_arg(shape(D, rank)), scale_factor=1.7) + yield SampleInput(make_arg(shape(D, rank)), scale_factor=0.6) + +def reference_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs) + + if mode in ('bilinear', ): + make_arg = partial( + make_tensor, + device=device, + dtype=dtype, + requires_grad=requires_grad, + # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype + high=256 if dtype == torch.uint8 else None, + ) + # provide a single sample for more typical image processing usage + for memory_format in [torch.contiguous_format, torch.channels_last]: + yield SampleInput( + make_arg((2, 3, 345, 456), memory_format=memory_format), + (270, 270), + ) + +def sample_inputs_upsample_aa(mode, self, device, dtype, requires_grad, **kwargs): + N = 6 + C = 3 + H = 10 + W = 20 + S = 3 + L = 5 + + input_tensor = make_tensor(torch.Size([N, C, H, W]), device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scale_factors=None) + yield SampleInput(input_tensor, output_size=torch.Size([L, L]), align_corners=False, scale_factors=None) + yield SampleInput(input_tensor, output_size=None, align_corners=False, scale_factors=[1.7, 0.9]) + yield SampleInput(input_tensor, output_size=None, align_corners=True, scale_factors=[0.8, 1.0]) + + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scales_h=None, scales_w=None) + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scales_h=1.7, scales_w=0.9) + yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=True, scales_h=1.7, scales_w=0.9) + +def sample_inputs_gelu(self, device, dtype, requires_grad, **kwargs): + N = 5 + for _ in range(1, N): + for approximate in ['none', 'tanh']: + yield SampleInput( + make_tensor((N * 2, N * 2), device=device, dtype=dtype, + requires_grad=requires_grad, low=-3, high=3), + approximate=approximate) + + +def error_inputs_gelu(op, device, **kwargs): + # Tests that gelu errors out when passed an approximation we don't know. + yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device), kwargs={"approximate": "asdf"}), + error_regex="approximate argument must be either") + + +def sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs): + inputs = [] + args_for_reduction_with_dim = ( + ((S, S, S), (1,),), + ((S, S, S), (1, True, ),), + ((), (0,),), + ((), (0, True,),), + ) + return ((SampleInput(make_tensor(input_tensor, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad), + *args)) + for input_tensor, args in args_for_reduction_with_dim) + +def sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput(make_arg((S, S, S))) + yield SampleInput(make_arg(())) + +def _generate_nan_reduction_inputs(device, dtype, requires_grad, **kwargs): + yield from _generate_reduction_inputs(device, dtype, requires_grad) + # NaN only exists for floating point numbers + if dtype.is_complex or dtype.is_floating_point: + yield torch.tensor([2, torch.nan, -1], device=device, dtype=dtype, requires_grad=requires_grad) + yield torch.tensor([[torch.nan, 2], [0, 1]], device=device, dtype=dtype, requires_grad=requires_grad) + +def sample_inputs_nan_reduction(supports_multiple_dims): + # Generates sample inputs for reduction ops that contain the input tensor + # and dim and keepdim kwargs. If a reduction op needs to test additional + # args/kwargs then create a separate sample_inputs function + def fn(op_info, device, dtype, requires_grad, **kwargs): + for t in _generate_nan_reduction_inputs(device, dtype, requires_grad): + # Add case without dim and keepdim kwargs + yield SampleInput(t.clone().requires_grad_(requires_grad)) + for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims): + yield SampleInput(t.clone().requires_grad_(requires_grad), **kwargs) + + return fn + +def sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad, **kwargs): + test_quantiles = (0.5, make_tensor((2,), dtype=dtype, device=device, low=0, high=1, requires_grad=requires_grad)) + test_interpolations = ['linear', 'midpoint'] + + for quantiles in test_quantiles: + for t in _generate_reduction_inputs(device, dtype, requires_grad): + # Add case without dim and keepdim kwargs + input = t.clone().requires_grad_(requires_grad) + yield SampleInput(input, quantiles) + for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False): + # Interpolation kwarg for now is only supported when providing both dim and keepdim + kwargs.setdefault('dim', 0) + kwargs.setdefault('keepdim', False) + for interpolation in test_interpolations: + kwargs['interpolation'] = interpolation + input = t.clone().requires_grad_(requires_grad) + yield SampleInput(input, quantiles, **kwargs) + +def sample_inputs_reduction_count_nonzero(*args, **kwargs): + """Sample inputs for count_nonzero""" + # count_nonzero does not support keepdim yet + for sample in sample_inputs_reduction(*args, **kwargs): + sample.kwargs.pop('keepdim', None) + yield sample + +def sample_inputs_leaky_relu(op_info, device, dtype, requires_grad, **kwargs): + N = 10 + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + return (SampleInput(make_arg((N, N))) for _ in range(1, N)) + +def sample_inputs_fractional_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size + cases = (((1, 3, 9, 9), 3), + ((1, 3, 9, 9), (4, 4)), + ((1, 3, 9, 9), (6, 6)), + ((2, 3, 9, 9), (3, 3)), + ((1, 1, 4, 4), (2, 2)), + ((1, 2, 6, 6), (4, 4))) + + for input_shape, kernel_size in cases: + for return_indices in [False, True]: + # test case passing a single output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=2, + return_indices=return_indices, + ) + + # test case passing a tuple output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=(2, 3), + return_indices=return_indices, + ) + + # test case passing an output ratio + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_ratio=(0.5, 0.5), + return_indices=return_indices, + ) + +def sample_inputs_fractional_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size + cases = (((2, 3, 5, 5, 5), (2, 2, 2)), + ((1, 2, 6, 5, 4), 2), + ((1, 2, 5, 6, 5), (2, 3, 2)), + ((1, 2, 6, 6, 6), (2, 3, 2)), + ((1, 1, 7, 6, 7), (2, 3, 4)), + ((1, 1, 4, 5, 4), (2, 2, 1)), + ((1, 1, 8, 7, 6), (4, 3, 2)), + ((0, 1, 4, 5, 4), (2, 2, 1))) + + for input_shape, kernel_size in cases: + for return_indices in [False, True]: + # test case passing a single output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=2, + return_indices=return_indices, + ) + + # test case passing a tuple output size + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_size=(2, 3, 2), + return_indices=return_indices, + ) + + # test case passing an output ratio + yield SampleInput( + make_arg(input_shape), + kernel_size, + output_ratio=(0.5, 0.5, 0.5), + return_indices=return_indices, + ) + +def sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override + cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2), + ((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2), + ((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2), + ((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2), + ((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2), + ((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None)) + + for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases: + yield SampleInput(make_arg(input_shape), + args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)) + # Case with just input_shape and kernel_size + yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3))) + +def sample_inputs_avgpool1d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size, kwargs + cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [ + ((2, 3, 9), (3,), {}), + ((1, 3, 9), 3, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False)), + ((1, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=True, count_include_pad=True)), + ((2, 3, 9), (3,), dict(stride=(1,), padding=(1,), ceil_mode=False, count_include_pad=True)), + ((0, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=False, count_include_pad=True)), + ((1, 2, 9), (7,), dict(stride=(3,), padding=(2,), ceil_mode=False)), + ((1, 2, 9), (7,), dict(stride=(3,), padding=(3,), ceil_mode=True)), + ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=False)), + ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=True)), + ] + + for input_shape, kernel_size, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) + +def sample_inputs_avgpool3d(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override + cases: List[Tuple[Tuple[int, ...], Union[int, Tuple[int, ...]], Dict]] = [ + ((2, 3, 3, 4, 4), (2, 2, 2), {}), + ((1, 2, 4, 4, 4), 2, dict(stride=1, padding=1, ceil_mode=True, + count_include_pad=False, divisor_override=2)), + ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=True, + count_include_pad=True, divisor_override=2)), + ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=False)), + ((1, 1, 7, 5, 7), (6, 3, 4), dict(stride=(2, 3, 2), padding=(3, 1, 0), ceil_mode=False, + count_include_pad=False, divisor_override=2)), + ((1, 1, 4, 5, 4), (2, 2, 3), dict(stride=(2, 2, 1), padding=0, ceil_mode=False, + count_include_pad=True, divisor_override=-2)), + ((1, 1, 6, 5, 6), (4, 5, 6), dict(stride=(2, 3, 2), padding=2, ceil_mode=True, + count_include_pad=True, divisor_override=None)), + ((0, 1, 4, 5, 4), (2, 3, 1), dict(stride=(2, 1, 2), padding=0, ceil_mode=False, + count_include_pad=True, divisor_override=None)), + ] + + for input_shape, kernel_size, kwargs in cases: + yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) + +def error_inputs_avg_pool1d(op_info, device, **kwargs): + # error inputs when pad is negative + x = torch.rand([0, 1, 49], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of kernel size') + +def error_inputs_avg_pool2d(op_info, device, **kwargs): + # error inputs when pad is negative + x = torch.rand([0, 1, 49], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + # 2-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of kernel size') + # 2-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of kernel size') + + # error inputs for zero divisor + x = torch.zeros(3, 3, 3) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (2, 2), 'divisor_override': 0}), + error_regex='divisor must be not zero') + +def error_inputs_avg_pool3d(op_info, device, **kwargs): + # error inputs when pad is negative + x = torch.rand([0, 1, 49, 50], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + # 3-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, 'padding': -1}), + error_regex='pad must be non-negative') + + # error inputs when pad > kernel_size / 2 + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of kernel size') + # 3-dimensional kernel + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, 'padding': 4}), + error_regex='pad should be at most half of kernel size') + + # error inputs for zero divisor + x = torch.zeros(3, 3, 3, 3) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (2, 2, 2), 'divisor_override': 0}), + error_regex='divisor must be not zero') + + # error inputs for invalid input dimension + x = torch.rand([0, 1, 49], dtype=torch.float32) + yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 0}), + error_regex='non-empty 4D or 5D') + + +def sample_inputs_to(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # test_multiple_devices_to_cuda would fail if we use a different device than given + devices = [device] + if torch.device(device).type == 'cpu': + devices = [torch.device('cpu'), torch.device('cuda:0')] if torch.cuda.is_available() else devices + memory_formats = [torch.preserve_format, torch.channels_last] + + # TODO: can't switch `to.device` overload to use positional arguments + # https://github.com/pytorch/pytorch/issues/84265 + # to.device overload + for device, nb, cp, mem_f in product(devices, [True, False], [True, False], memory_formats): + kwargs = { + "memory_format": mem_f, + } + yield SampleInput(make_arg((S, S, S, S)), args=(device, torch.float64, nb, cp), kwargs=kwargs) + + # to.dtype overload + for nb, cp, mem_f in product([True, False], [True, False], memory_formats): + kwargs = { + "memory_format": mem_f, + } + yield SampleInput(make_arg((S, S, S, S)), args=(torch.float64, nb, cp), kwargs=kwargs) + + # to.other overload + for device, nb, cp, mem_f in product(devices, [True, False], [True, False], memory_formats): + kwargs = { + "memory_format": mem_f, + } + other = make_arg((S, S, S, S), dtype=torch.float64, device=device) + yield SampleInput(make_arg((S, S, S, S)), args=(other, nb, cp), kwargs=kwargs) + + +def sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs): + def get_tensor_input(size): + return make_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad) + + yield SampleInput(get_tensor_input((S, M, S)), 3) + yield SampleInput(get_tensor_input((S, M, S)), 3, 1) + yield SampleInput(get_tensor_input((S, M, S)), 3, -2) + yield SampleInput(get_tensor_input((S, M, S)), 3, 1, True) + yield SampleInput(get_tensor_input((S, M, S)), 3, -2, True) + yield SampleInput(get_tensor_input((S, M, S)), 3, 1, True, True) + yield SampleInput(get_tensor_input((S, M, S)), 3, -2, True, True) + + yield SampleInput(get_tensor_input(()), 1) + yield SampleInput(get_tensor_input(()), 1, 0) + yield SampleInput(get_tensor_input(()), 1, -1) + yield SampleInput(get_tensor_input(()), 1, 0, True) + yield SampleInput(get_tensor_input(()), 1, -1, True) + yield SampleInput(get_tensor_input(()), 1, 0, True, True) + yield SampleInput(get_tensor_input(()), 1, -1, True, True) + +def sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg(S), make_arg(M)) + +def sample_inputs_dist(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S)) + ps = (2, 4) + + for size_x, size_y, p in product(sizes, sizes, ps): + yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p)) + +# Missing to test the nondeterminism of the operation +# https://github.com/pytorch/pytorch/issues/53352 +def sample_inputs_index(op_info, device, dtype, requires_grad, reference=False, **kwargs): + # target.index_select(dim, idx) + select = "index_select" in op_info.name + # target.index_add(dim, idx, source, *, alpha=1) + add = "index_add" in op_info.name + # target.index_copy(dim, idx, source) + copy = "index_copy" in op_info.name + # target.index_fill(dim, idx, value) + fill = "index_fill" in op_info.name + + # Extended reference inputs. We generate that exercise atomic adds / writing + # several times to one location + if reference: + make_arg = partial(torch.ones, device=device, dtype=dtype, requires_grad=requires_grad) + make_idx = partial(torch.zeros, device=device, dtype=torch.int64) + else: + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # idx They need to be different for copy and add to be deterministic + if copy or add: + make_idx = partial(torch.randperm, device=device, dtype=torch.int64) + else: + def make_idx(n): + return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=n) + + shapes = [(), (1,), (S, S)] + # extra parameter for add + if add: + if dtype == torch.bool: + alphas = (True, False) + else: + alphas = (-1, 0, 2) + else: + alphas = (None,) + + if fill: + # A weird number to catch errors. + # The former one tests `index_fill.int_Scalar`, and the latter one tests `index_fill.int_Tensor`. + values = (make_arg((1,)).item(), make_arg(())) + else: + values = (None,) + + for shape, alpha, value in product(shapes, alphas, values): + t = make_arg(shape) + args = [] + + # dim. We handle the scalar case + dim = -1 if t.ndim == 2 else 0 + args.append(dim) + + idx = make_idx(t.shape[dim] if t.ndim != 0 else 1) + args.append(idx) + + # source + if copy or add: + args.append(make_arg(shape)) + elif fill: + args.append(value) + + args = tuple(args) + kwargs = {} if alpha is None else {"alpha": alpha} + + yield SampleInput(t, args=args, kwargs=kwargs) + +def sample_inputs_index_reduce(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_idx(n, m): + return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=m) + + shapes = [((), ()), ((1,), (1,)), ((S, S), (S, M)), ((S, S, S), (S, M, S))] + include_selfs = (True, False) + reduces = ('prod', 'mean', 'amin', 'amax') + + for shape, include_self, reduce in product(shapes, include_selfs, reduces): + self_shape, src_shape = shape + # dim. We handle the scalar case + dim = 1 if len(self_shape) >= 2 else 0 + idx = make_idx(src_shape[dim] if len(src_shape) != 0 else 1, + self_shape[dim] if len(self_shape) != 0 else 1) + args = (dim, idx, make_arg(src_shape), reduce) + yield SampleInput(make_arg(self_shape), + args=args, + kwargs={'include_self' : include_self}) + + # Sample inputs to test edge cases for backward + if requires_grad: + # Check that gradients are propagated correctly for prod when zeros in self/src are reduced + # This sample tests gradients for the following cases + # (a) 1 zero reduced (from source (self[0, 1]), from self (self[0, 0])) + # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0], self[1, 1]) + # (c) no zeros reduced (self[2, 1], self[2, 2]) + # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py + # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad + input = torch.tensor([[0, 13], [0, 0], [15, 19]], dtype=dtype, device=device, requires_grad=requires_grad) + src = torch.tensor([[2, 0], [0, 0], [2, 3], [2, 2]], dtype=dtype, device=device, requires_grad=requires_grad) + idx = torch.tensor([0, 1, 2, 0], dtype=torch.long, device=device) + + yield SampleInput(input, + args=(0, idx, src, 'prod'), + kwargs={'include_self': True}) + +def sample_inputs_mode(op_info, device, dtype, requires_grad, **kwargs): + args = ( + ((S, S, S), (),), + ((S, S, S), (1, ),), + ((S, S, S), (1, True, ),), + ((), (),), + ((), (0,),), + ((), (0, True,),), + # Non-fused mode kernel on CUDA + ((3000,), ()), + ) + make_arg = partial(make_tensor, dtype=dtype, device=device, + requires_grad=requires_grad, low=None, high=None) + return (SampleInput(make_arg(input_tensor), *args) + for input_tensor, args in args) + +# Missing to test the nondeterminism of the operation +# https://github.com/pytorch/pytorch/issues/53352 +def sample_inputs_put(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) + + S = 3 + + # Generic inputs + idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S] + idx_list = [idx, -idx - 1] + for idx, acc in product(idx_list, (True, False)): + yield SampleInput(input=make_arg((S, S)), + args=(idx.clone(), + make_arg((S,)), + acc)) + + # Scalar cases + scalar_sizes = [(), (1,)] + tgt_gen = (make_arg(size) for size in scalar_sizes) + idx_gen = (make_idx(size, high=1) for size in scalar_sizes) + src_gen = (make_arg(size) for size in scalar_sizes) + for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)): + yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), + args=(idx.clone(), + src.clone().requires_grad_(requires_grad), + acc)) + + # Empty cases + tgt_sizes = [(0,), (), (1,), (3, 2)] + tgt_gen = (make_arg(size) for size in tgt_sizes) + idx = make_idx((0,), high=1) + src = make_arg((0,)) + for tgt, acc in product(tgt_gen, (True, False)): + yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), + args=(idx.clone(), + src.clone().requires_grad_(requires_grad), + acc)) + +def sample_inputs_take(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) + + S = 3 + + # Generic inputs: take S elements out of S * S + index = make_idx((S,), high=(S * S)) + for idx in (index, -index - 1): + yield SampleInput(input=make_arg((S, S)), args=(idx,)) + + # Scalar cases + scalar_sizes = [(), (1,)] + src_gen = (make_arg(size) for size in scalar_sizes) + idx_gen = (make_idx(size, high=1) for size in scalar_sizes) + for src, idx in product(src_gen, idx_gen): + yield SampleInput(input=src.clone().requires_grad_(requires_grad), + args=(idx.clone(),)) + + # Empty cases + src_sizes = [(0,), (), (1,), (3, 2)] + src_gen = (make_arg(size) for size in src_sizes) + + idx = make_idx((0,), high=1) + for src in src_gen: + yield SampleInput(input=src.clone().requires_grad_(requires_grad), + args=(idx.clone(),)) + +def sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg((4, 3, 2, 1)), [0, 1, 2, 3], [3, 2, 1, 0]) + yield SampleInput(make_arg((4, 3, 2, 1)), [0, -1, -2, -3], [-3, -2, -1, -0]) + +def reference_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # shape, source, destination + args = ( + # empty inputs + ((), (), ()), + # int inputs, negative + ((3, 5, 7, 2), -2, 1), + # swap bounds + ((3, 5, 7, 2), (-1, 0), (0, -1)), + # non-sequential, negative + ((2, 3, 4, 5, 6), (3, -3, 4), (1, 0, -1)), + # idempotence, negative + ((2, 3, 4, 5, 6), (-3, 4, 3, 1), (-3, 4, 3, 1)), + # reverse, sequential, positive + ((6, 2, 3, 5, 4), (4, 3, 2, 1, 0), (0, 1, 2, 3, 4)), + # reverse, non-sequential + ((6, 2, 3, 5, 4), (-3, -2, -4, -5, -1), (2, 1, 3, 4, 0)), + # reverse, sequential, negative + ((6, 2, 3, 5, 4), (4, -2, 2, -4, -5), (-5, 1, 2, -2, -1)), + ) + + for shape, source, destination in args: + yield SampleInput(make_arg(shape), args=(source, destination)) + +def error_movedim_moveaxis(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # source length < destination length + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((3, -3), (1, 0, -1))), + error_regex=(r"movedim: Invalid source or destination dims: source " + r"\(\[3, -3\] dims\) should contain the same number of " + r"dims as destination \(\[1, 0, -1\] dims\)"), + ) + + # source length > destination length + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((3, -3, 4), (1, 0))), + error_regex=(r"movedim: Invalid source or destination dims: source " + r"\(\[3, -3, 4\] dims\) should contain the same number of " + r"dims as destination \(\[1, 0\] dims\)"), + ) + + # repeated source dim, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((0, 4, -5), (1, 0, 2))), + error_regex=r"movedim: repeated dim in `source` \(\[0, 4, -5\]\)", + ) + + # repeated destination dim, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 0, 2), (0, 4, -5))), + error_regex=r"movedim: repeated dim in `destination` \(\[0, 4, -5\]\)", + ) + + # repeated dim (both), with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 0, -4), (0, 4, -5))), + error_regex=r"movedim: repeated dim in `source` \(\[1, 0, -4\]\)", + ) + + # out of bounds source inputs, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((0, 1, -6), (1, 4, 2))), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + + # out of bounds destination inputs, with negative indices + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 4, 2), (0, 1, -6))), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + + # out of bounds source input, int + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=(-6, 1)), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + + # out of bounds destination input, int + yield ErrorInput( + SampleInput(make_arg(2, 3, 4, 5, 6), args=(3, -6)), + error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", + error_type=IndexError, + ) + +def sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),) + shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1)) + + if requires_grad: + # Tests for variant_consistency_jit, grad, gradgrad + # are slower. Use smaller bags of `rep_dims` and `shapes` + # in this case. + rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment] + shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment] + + is_repeat_op = op_info.name in ['repeat', '_refs.repeat'] + for rep_dim, shape in product(rep_dims, shapes): + # `torch.repeat` errors for `len(rep_dims) < t.dim()`, + # so we filter such combinations. + if is_repeat_op and len(rep_dim) < len(shape): + continue + yield SampleInput(make_arg(shape), rep_dim) + + +def sample_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, *, is_narrow, **kwargs): + shapes_and_args = ( + ((S, S, S), 1, 2, 2), + ((S, S, S), -1, 2, 2), + ((S, S, S), 1, 0, 0), + ((S, S, S), -1, 0, 0), + ((S, S, S), 2, 1, 2), + ) + + for shape, dim, start, length in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(tensor, dim, start, length) + # narrow also accepts the start argument being a Tensor + if is_narrow: + yield SampleInput(tensor, dim, torch.tensor(start), length) + +def reference_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, *, is_narrow, **kwargs): + yield from sample_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, is_narrow=is_narrow, **kwargs) + + shapes_and_args = ( + # 1-dim + ((M,), 0, 0, 0), # 0 elems from the left + ((M,), -1, -1, 0), # 0 elems from the right + ((M,), 0, 5, 3), # 3 elems from the left + ((M,), 0, -5, 2), # 2 elems from the right + ((M,), -1, 0, M), # M elems from the left + ((M,), 0, -M, M), # M elems from the right + + # 2-dim + ((M, S), 1, 0, 0), # dim 1, 0 elems from the left + ((S, M), -2, -1, 0), # dim 0, 0 elems from the right + ((L, S), 1, 2, 3), # dim 1, 3 elems from the left + ((L, S), -1, 3, 2), # dim 1, 2 elems from the left + ((M, L), 0, 0, M), # dim 0, M elems from the left + ((M, L), -1, -L, L), # dim 1, L elems from the right + + # 3-dim + ((L, M, S), 2, 0, 0), # dim 2, 0 elems from the left + ((M, S, L), -1, -1, 0), # dim 2, 0 elems from the right + ((S, L, M), 2, 0, M), # dim 2, M elems from the left + ((L, S, M), -1, -M, M), # dim 2, M elems from the right + ((S, L, M), 1, 0, 0), # dim 1, 0 elems from the left + ((S, L, M), 0, 2, 1), # dim 0, 1 elem from the left + ((M, S, M), -1, -5, 4), # dim 2, 4 elems from the right + ) + + for shape, dim, start, length in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(tensor, dim, start, length) + # narrow also accepts the start argument being a Tensor + if is_narrow: + yield SampleInput(tensor, dim, torch.tensor(start), length) + +def error_inputs_narrow_narrow_copy(op_info, device, *, is_narrow, is_ref): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # 0-dim + yield ErrorInput(SampleInput(make_arg(()), 0, 0, 1), + error_type=RuntimeError, + error_regex=r"narrow\(\) cannot be applied to a 0-dim tensor\.") + + # out of bounds dim + if not is_narrow and not is_ref and torch.device(device).type == 'cpu': + # narrow_copy_dense_cpu_out + yield ErrorInput(SampleInput(make_arg((M, S, L)), 3, 0, 0), + error_type=RuntimeError, + error_regex=r"Expected dim < static_cast\(self_sizes.size\(\)\) to be true, but got false\.") + else: + yield ErrorInput(SampleInput(make_arg((M, S, L)), 3, 0, 0), + error_type=IndexError, + error_regex=r"Dimension out of range \(expected to be in range of \[-3, 2\], but got 3\)") + # out of bounds dim (negative) + yield ErrorInput(SampleInput(make_arg((L, S, M)), -4, 0, 0), + error_type=IndexError, + error_regex=r"Dimension out of range \(expected to be in range of \[-3, 2\], but got -4\)") + + # out of bounds start + yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, M + 1, 0), + error_type=IndexError, + error_regex=r"start out of range \(expected to be in range of \[-10, 10\], but got 11\)") + # out of bounds start (negative) + yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, -M - 1, 0), + error_type=IndexError, + error_regex=r"start out of range \(expected to be in range of \[-10, 10\], but got -11\)") + + # out of bounds length + yield ErrorInput(SampleInput(make_arg((S, L, M)), 2, 0, M + 1), + error_type=RuntimeError, + error_regex=r"start \(0\) \+ length \(11\) exceeds dimension size \(10\)\.") + # out of bounds length (negative) + if not is_narrow and not is_ref and torch.device(device).type == 'cpu': + # narrow_copy_dense_cpu_out + yield ErrorInput(SampleInput(make_arg((M,)), 0, 0, -1), + error_type=RuntimeError, + error_regex=r"start \(0\) \+ length \(-1\) exceeds dimension size \(10\)\.") + else: + yield ErrorInput(SampleInput(make_arg((M,)), 0, 0, -1), + error_type=RuntimeError, + error_regex=r"narrow\(\): length must be non-negative\.") + + # Test Tensor overload that was added for XLA. Start must be an 0-dim + # integral Tensor. narrow_copy doesn't have this overload. + # https://github.com/pytorch/pytorch/issues/31558 + if is_narrow: + # *1-dim* integral Tensor + yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, make_arg(S, dtype=torch.int), 2), + error_type=RuntimeError, + error_regex=r"start must be an 0-dim integral Tensor\.") + + # 0-dim *bool* Tensor (bools are not allowed) + yield ErrorInput(SampleInput(make_arg((L, M, S)), -3, make_arg((), dtype=torch.bool), 3), + error_type=RuntimeError, + error_regex=r"start must be an 0-dim integral Tensor\.") + + +def sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs): + y_shape_x_shape_and_kwargs = [ + ((2, 3), (2, 3), {}), + ((2, 3), (2, 3), {'dim': 1}), + ((6,), (6,), {}), + ((6,), None, {}), + # When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad + # See Issue #{61619} + # ((6,0), (6,0), {}), + ((2, 3), (1, 3), {}), + ((3, 3), (3, 3), {}), + ((3, 3), (3, 3), {'dim': -2}), + ((5,), None, {'dx': 2.0}), + ((2, 2), None, {'dx': 3.0}) + ] + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: + y_tensor = make_arg(y_shape) + if x_shape is not None: + x_tensor = make_arg(x_shape) + yield SampleInput(y_tensor, x_tensor, **kwarg) + else: + yield SampleInput(y_tensor, **kwarg) + +def sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs): + + y_shape_x_shape_and_kwargs = [ + ((2, 3), (2, 3), {}), + ((2, 3), (2, 3), {'dim': 1}), + ((6,), (6,), {}), + ((6,), None, {}), + # When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad + # See Issue #{61619} + # ((6,0), (6,0), {}), + ((2, 3), (1, 3), {}), + ((3, 3), (3, 3), {}), + ((3, 3), (3, 3), {'dim': -2}), + ((5,), None, {'dx': 2.0}), + ((2, 2), None, {'dx': 3.0}) + ] + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad, low=None, high=None) + for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: + y_tensor = make_arg(y_shape) + if x_shape is not None: + x_tensor = make_arg(x_shape) + yield SampleInput(y_tensor, x_tensor, **kwarg) + else: + yield SampleInput(y_tensor, **kwarg) + +def sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs): + shapes_and_axes = [ + ((3, 4, 5), 0), + ((3, 4, 5), 1), + ((3, 4, 5), 3), + ((3, 4, 5), -1), + ((3, 4, 5), -3), + ((), 0), + ((), -1), + ((1,), 0), + ((1,), -1), + ] + + for shape, axis in shapes_and_axes: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + yield SampleInput(tensor, axis) + + +def sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs): + shapes = ((0, 1, 5, 5), (2, 3, 5, 5)) + kernel_sizes = (2, (2, 2), (2, 3)) + dilations = (1, 2, (1, 2)) + paddings = (0, 1, (1, 2)) + strides = (1, 2, (1, 2)) + + cases = product(shapes, kernel_sizes, dilations, paddings, strides) + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + for shape, kernel_size, dilation, padding, stride in cases: + tensor = make_arg(shape) + yield SampleInput(tensor, kernel_size, dilation, padding, stride) + + # With default args + yield SampleInput(make_arg((1, 1, 5, 5)), (3, 3)) + + +def sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs): + shapes_and_args = ( + ((S, 1, S, 1), ()), + ((1, 1, 1, 1), ()), + ((1, 1, 1, 1), (0,)), + ((S, 1, S, 1), (1,)), + ((S, 1, S, 1), (-1,)), + ((S, 1, S, 1), (2,)), + ((S, 1, S, 1), (-2,)), + ((), (0, )), + ) + + for shape, args in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + + yield SampleInput(tensor, args=args) + + +def sample_inputs_squeeze_multiple(op_info, device, dtype, requires_grad, **kwargs): + shapes_and_args = ( + ((1, 1, 1, 1), ()), + ((S, 1, S, 1), (1,)), + ((S, 1, S, 1), (-1,)), + ((S, 1, S, 1), (1, 3)), + ((S, 1, S, 1), (1, 2,)), + ((), (0,)), + ) + + for shape, dims in shapes_and_args: + tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, + requires_grad=requires_grad) + + yield SampleInput(tensor, dims) + + +def _squeeze_ref(x, axis=None): + # NumPy doesn't allow squeezing scalars + if x.ndim == 0: + return x + + if isinstance(axis, Sequence): + # Numpy doesn't allow specifying non-singular dimensions + axis = tuple(a for a in axis if x.shape[a] == 1) + + if isinstance(axis, int) and x.shape[axis] != 1: + return x + + return np.squeeze(x, axis) + +def sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs): + assert mode in ('constant', 'reflect', 'replicate', 'circular') + if mode in ['reflect', 'replicate']: + cases: tuple = ( # ignore + ((1, 3), (1, 2)), + ((1, 3), (0, 1)), + ((0, 3, 3), (1, 2)), + ((0, 3, 3), (0, 1)), + ((1, 3, 3), (1, 2)), + ((1, 3, 3), (0, 1)), + ((1, 3, 3), (0, 2, 0, 1)), + ((0, 3, 3, 3), (0, 2, 0, 1)), + ((3, 3, 5, 5), (0, 2, 0, 1)), + ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), + ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 4, 4), (-1, 1, -2, 1)), + ) + elif mode == 'constant': + cases = ( + ((1, 3), (1, 2)), + ((1, 3), (0, 1)), + ((1, 3), (0, 2, 0, 1)), + ((0, 3, 3), (1, 2)), + ((0, 3, 3), (0, 1)), + ((0, 3, 3), (0, 2, 0, 1)), + ((0, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 3), (1, 2)), + ((1, 3, 3), (0, 1)), + ((1, 3, 3), (0, 2, 0, 1)), + ((1, 3, 3), (1, 1, 1, 1, 1, 1)), + ((0, 3, 3, 3), (1, 2)), + ((0, 3, 3, 3), (0, 1)), + ((0, 3, 3, 3), (0, 2, 0, 1)), + ((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((3, 3, 5, 5), (1, 2)), + ((3, 3, 5, 5), (0, 1)), + ((3, 3, 5, 5), (0, 2, 0, 1)), + ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), + ((1, 3, 3, 3, 3), (1, 2)), + ((1, 3, 3, 3, 3), (0, 1)), + ((1, 3, 3, 3, 3), (0, 2, 0, 1)), + ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 4, 4), (-1, 1, -2, 1)), + ) + else: # mode == 'circular' + if dtype == torch.bool: + # test_dtypes fails on ASAN with for the case ab + # runtime error: load of value 190, which is not a valid value for type 'bool' + # Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562 + # Reference Issue: https://github.com/pytorch/pytorch/issues/63034 + cases = ( + ((2, 3, 3), (1, 2)), + ((1, 3, 3), (1, 2)), + ) + else: + cases = ( + ((0, 3, 3), (1, 2)), + ((0, 3, 3), (0, 1)), + ((1, 3, 3), (1, 2)), + ((1, 3, 3), (0, 1)), + ((0, 3, 3, 3), (0, 2, 0, 1)), + ((3, 3, 5, 5), (0, 2, 0, 1)), + ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), + ((1, 3, 4, 4), (-1, 1, -2, 1)), + ) + + make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if mode == 'constant': + # Default args + yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),)) + + if mode in ['reflect', 'replicate', 'circular']: + for shape, pad in cases: + yield SampleInput(make_inp(shape), args=(pad, mode)) + else: # mode == 'constant' + for pad_value in (1., 2.): + for shape, pad in cases: + yield SampleInput(make_inp(shape), args=(pad, mode, pad_value)) + +def sample_inputs_nn_pad_replicate_negative(op_info, device, dtype, requires_grad, **kwargs): + cases: tuple = ( + ((5, 3, 4, 4), (-4, 5, 0, 0)), + ((6, 2, 4, 4), (0, 0, 2, -4)), + ((5, 6, 4, 4), (5, -4, -4, 3)), + ((4, 2, 5, 5), (-2, -1, 4, 6)), + ((2, 6, 5, 5), (8, -1, -1, -3)), + ((8, 1, 5, 5), (-2, -1, -1, -3)), + ) + make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for shape, pad in cases: + yield SampleInput(make_inp(shape), args=(pad, 'replicate')) + +def sample_inputs_constant_pad_nd(op_info, device, dtype, *args, **kwargs): + # Inherit sample inputs from nn.pad, but transform them to fit + # constant_pad_nd's interface + nn_samples = sample_inputs_nn_pad(op_info, device, dtype, *args, + mode='constant', **kwargs) + + # NOTE: primTorch is more strict about the type of the fill value argument + # So we must cast it to the correct dtype + from torch._prims_common import dtype_to_type + scalar_type = dtype_to_type(dtype) + + def drop_mode_argument(input, pad, mode=None, value=None): + if value is None: + return SampleInput(input, args=(pad,)) + else: + return SampleInput(input, args=(pad, scalar_type(value))) + + for sample in nn_samples: + yield drop_mode_argument(sample.input, *sample.args, **sample.kwargs) + +def sample_inputs_repeat_interleave(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_input(()), repeats=2) + yield SampleInput(make_input((2, 3, 4)), repeats=2) + yield SampleInput(make_input((2, 3, 4)), repeats=2, dim=1) + yield SampleInput(make_input((2, 3, 4)), repeats=torch.arange(3, device=device), dim=1) + + +def sample_inputs_stft(op_info, device, dtype, requires_grad, **kwargs): + def mt(shape, **kwargs): + return make_tensor(shape, device=device, dtype=dtype, + requires_grad=requires_grad, **kwargs) + + yield SampleInput(mt(100), n_fft=10, return_complex=True) + yield SampleInput(mt(100), n_fft=10, return_complex=False) + if dtype.is_complex: + yield SampleInput(mt(100), n_fft=10) + + for center in [False, True]: + yield SampleInput(mt(10), n_fft=7, center=center, return_complex=True) + yield SampleInput(mt((10, 100)), n_fft=16, hop_length=4, + center=center, return_complex=True) + + window = mt(16, low=.5, high=2.0) + yield SampleInput( + mt((2, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) + yield SampleInput( + mt((3, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) + if not dtype.is_complex: + yield SampleInput( + mt((10, 100)), n_fft=16, window=window, onesided=False, + return_complex=True) + + +def sample_inputs_istft(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def mt(shape, **kwargs): + real_shape = shape if dtype.is_complex else shape + (2,) + return make_arg(real_shape, **kwargs) + + yield SampleInput(mt((10, 2)), kwargs=dict(n_fft=10)) + yield SampleInput(mt((6, 3)), kwargs=dict(n_fft=6, onesided=False)) + yield SampleInput(mt((6, 4)), kwargs=dict(n_fft=10, onesided=True)) + + for center in [False, True]: + yield SampleInput(mt((10, 10, 6)), kwargs=dict(n_fft=10, center=center)) + yield SampleInput(mt((1, 9, 10)), kwargs=dict(n_fft=16, hop_length=4, center=center)) + + window = make_arg(10, low=.5, high=2.0) + yield SampleInput(mt((10, 10, 6)), kwargs=dict( + n_fft=10, window=window, center=center, return_complex=dtype.is_complex)) + yield SampleInput(mt((10, 10, 10)), kwargs=dict( + n_fft=10, window=window[:8], win_length=8, center=center, return_complex=True)) + + real_window = window if not dtype.is_complex else window.real + yield SampleInput(mt((10, 5, 6)), kwargs=dict(n_fft=8, window=real_window[:8], center=center)) + +def sample_inputs_ormqr(op_info, device, dtype, requires_grad, **kwargs): + # create a helper function wrapping `make_tensor` + make_input = partial(make_tensor, dtype=dtype, device=device, low=-1, high=1) + + batches = [(), (0, ), (2, ), (2, 1)] + ns = [5, 2, 0] + tf = [True, False] + for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf): + input = make_input((*batch, m, n)) + reflectors, tau = torch.geqrf(input) + reflectors.requires_grad_(requires_grad) + tau.requires_grad_(requires_grad) + other_matrix_shape = (m, n) if left else (n, m) + other = make_input((*batch, *other_matrix_shape), requires_grad=requires_grad) + yield SampleInput(reflectors, tau, other, left=left, transpose=transpose) + + +def sample_inputs_cholesky_solve(op_info, device, dtype, requires_grad=False, **kwargs): + cholesky_inverse_samples = sample_inputs_linalg_cholesky_inverse( + op_info, device, dtype, requires_grad=False + ) + + for sample in cholesky_inverse_samples: + psd_matrix = sample.input + sample.input = make_tensor(psd_matrix.shape, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None) + sample.args = (psd_matrix.requires_grad_(requires_grad),) + yield sample + + +def sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_fullrank_matrices_with_distinct_singular_values, + dtype=dtype, device=device, requires_grad=requires_grad) + + # not needed once OpInfo tests support Iterables + batch_shapes = ((), (3,), (3, 3)) + for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)): + shape = batch_shape + (S + size_delta, S) + input = make_arg(*shape) + yield SampleInput(input, args=(True, get_infos)) + + +def sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs): + def out_fn(output): + return output[1], output[2] + + for lu_sample in sample_inputs_linalg_lu(op_info, device, dtype, requires_grad, **kwargs): + lu_data, pivots = torch.linalg.lu_factor(lu_sample.input) + lu_data.requires_grad_(requires_grad) + yield SampleInput(lu_data, pivots).with_metadata(output_process_fn_grad=out_fn) + + +def sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2))) + + for arg in args: + yield SampleInput(make_arg((0, 0, 0)), args=arg) + yield SampleInput(make_arg((S, S, S)), args=arg) + + # Scalar tensor + yield SampleInput(make_arg(()), args=(10, )) + +def error_inputs_roll(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + err_msg1 = "`shifts` required" + s1 = SampleInput(make_arg((S,)), ()) + yield ErrorInput(s1, error_regex=err_msg1) + + err_msg2 = ("shifts and dimensions must align") + s2 = SampleInput(make_arg((S, S)), (2, 1), 0) + yield ErrorInput(s2, error_regex=err_msg2) + + err_msg3 = ("out of range") + s3 = SampleInput(make_arg((S, )), 0, 2) + yield ErrorInput(s3, error_regex=err_msg3, error_type=IndexError) + + err_msg4 = ("Dimension specified as 0") + s4 = SampleInput(make_arg(()), 0, 0) + yield ErrorInput(s4, error_regex=err_msg4, error_type=IndexError) + +def sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + args = itertools.product(range(-5, 6), [(0, 1), (1, 2), (1, -1)]) + + yield SampleInput(make_arg((S, S, S))) + for arg in args: + yield SampleInput(make_arg((S, S, S)), args=arg) + + +def error_inputs_rot90(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + err_msg1 = "expected total rotation dims" + s1 = SampleInput(make_arg((S, S)), dims=(0,)) + yield ErrorInput(s1, error_regex=err_msg1) + + err_msg2 = "expected total dims >= 2" + s2 = SampleInput(make_arg((S,))) + yield ErrorInput(s2, error_regex=err_msg2) + + err_msg3 = "expected rotation dims to be different" + s3 = SampleInput(make_arg((S, S)), dims=(1, 1)) + yield ErrorInput(s3, error_regex=err_msg3) + + +def sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs): + tensor_nd = partial(make_tensor, (S, S, S), device=device, dtype=dtype, + requires_grad=requires_grad) + tensor_1d = partial(make_tensor, (S,), device=device, dtype=dtype, + requires_grad=requires_grad) + + yield SampleInput(tensor_nd()) + yield SampleInput(tensor_nd(), dim=1) + yield SampleInput(tensor_nd(), dim=1, unbiased=True, keepdim=True) + yield SampleInput(tensor_1d(), dim=0, unbiased=True, keepdim=True) + yield SampleInput(tensor_1d(), dim=0, unbiased=False, keepdim=False) + + yield SampleInput(tensor_nd(), dim=(1,), correction=1.3) + yield SampleInput(tensor_nd(), dim=(1,), correction=S // 2) + yield SampleInput(tensor_nd(), dim=None, correction=0, keepdim=True) + yield SampleInput(tensor_nd(), dim=None, correction=None) + yield SampleInput(tensor_nd(), correction=0, keepdim=True) + yield SampleInput(make_tensor(3, 4, 5, device=device, dtype=dtype, requires_grad=requires_grad), dim=-3) + + +def sample_inputs_std_var_unbiased(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, + requires_grad=requires_grad) + + # Test var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + yield SampleInput(make_arg((S, S)), True) + yield SampleInput(make_arg((S,)), False) + + +def _generate_correlation_inputs(device, dtype, requires_grad, **kwargs): + shapes = [(2,), (1, 2), (3, 2), (2, 3)] + for shape in shapes: + yield make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) + + +def sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs): + return (SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)) + + +def sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs): + for t in _generate_correlation_inputs(device, dtype, requires_grad): + yield SampleInput(t) + num_observations = t.numel() if t.ndimension() < 2 else t.size(1) + fweights = make_tensor((num_observations,), dtype=torch.int, device=device, low=1, high=10) + aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=0, high=1, requires_grad=requires_grad) + for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]): + yield SampleInput(t.clone().requires_grad_(requires_grad), + correction=correction, fweights=fw, aweights=aw) + + +def error_inputs_cov(op_info, device, **kwargs): + a = torch.rand(S, device=device) + yield ErrorInput( + SampleInput(torch.rand(S, S, S, device=device)), + error_regex="expected input to have two or fewer dimensions") + yield ErrorInput( + SampleInput(a, fweights=torch.rand(S, S, device=device)), + error_regex="expected fweights to have one or fewer dimensions") + yield ErrorInput( + SampleInput(a, aweights=torch.rand(S, S, device=device)), + error_regex="expected aweights to have one or fewer dimensions") + yield ErrorInput( + SampleInput(a, fweights=torch.rand(S, device=device)), + error_regex="expected fweights to have integral dtype") + yield ErrorInput( + SampleInput(a, aweights=torch.tensor([1, 1], device=device)), + error_regex="expected aweights to have floating point dtype") + yield ErrorInput( + SampleInput(a, fweights=torch.tensor([1], device=device)), + error_regex="expected fweights to have the same numel") + yield ErrorInput( + SampleInput(a, aweights=torch.rand(1, device=device)), + error_regex="expected aweights to have the same numel") + yield ErrorInput( + SampleInput(a, fweights=torch.tensor([-1, -2, -3, -4 , -5], device=device)), + error_regex="fweights cannot be negative") + yield ErrorInput( + SampleInput(a, aweights=torch.tensor([-1., -2., -3., -4., -5.], device=device)), + error_regex="aweights cannot be negative") + + +def sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = [((1, 2, 3, 4), (0, 2, 3, 1)), + ((1, 2, 3, 4), (0, -2, -1, 1)), + ((), ()), + ((1, 2, 3, 4), (2, 1, 3, 0))] + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=(args,)) + +def reference_inputs_permute(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_permute(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = ( + ((), ()), + ((1,), (0,)), + ((2, 2), (1, 0)), + ((2, 2), (0, 1)), + ((2, 0, 1), (0, 2, 1)), + ((3, 4, 2), (2, 1, 0)), + ((3, 4, 2), (1, 0, 2)), + ((3, 4, 2), (0, 1, 2)), + ) + + # Adds tricky permutations and permutations with noncontiguity + for shape, permutation in cases: + for p in itertools.permutations(permutation): + a = make_arg(shape).permute(p) + yield SampleInput(a, args=(permutation,)) + + a = make_arg(shape, noncontiguous=True).permute(p) + yield SampleInput(a, args=(permutation,)) + +def error_inputs_softshrink(op, device, **kwargs): + yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device), kwargs={"lambd": -0.5}), + error_regex="lambda must be greater or equal to 0, but found to be -0.5") + +def sample_inputs_softshrink(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # The additional sample is to check additional values of lambd beyond the default + # value (what is already checked by sample_inputs_elementwise_unary) + for lbda in (0., 0.5): + yield SampleInput(make_arg(S, S), kwargs={"lambd": lbda}) + + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) + +def sample_inputs_hardshrink(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # The additional sample is to check additional values of lambd beyond the default + # value (what is already checked by sample_inputs_elementwise_unary) + # Note that unlike softshrink, lambd is allowed to be negative for hardshrink + for lbda in (-0.5, 0., 0.5): + yield SampleInput(make_arg(S, S), kwargs={"lambd": lbda}) + + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) + + +def sample_inputs_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # The additional sample is to check additional values of min_val and max_val beyond the default + # value (what is already checked by sample_inputs_elementwise_unary) + for max_val, min_val in ((-0.5, 0.5), (0.5, -0.5), (0., 0.)): + yield SampleInput(make_arg(S, S), kwargs={"min_val": min_val, "max_val": max_val}) + + yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) + + +def sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs): + def c(t): + return t.clone().requires_grad_(requires_grad) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + x = make_arg((3,)) + y = make_arg((4,)) + A = make_arg((2, 3,)) + B = make_arg((1, 3,)) + C = make_arg((1, 2, 3,)) + D = make_arg((1, 3, 4,)) + E = make_arg((4, 4,)) + H = make_arg((3, 3,)) + I = make_arg((1, 3, 1,)) + + # Vector operations + yield SampleInput([c(x)], 'i->') # sum + yield SampleInput([c(x), c(y)], 'i,j->ij') # outer + + # Matrix operations + yield SampleInput([c(A)], "ij->i") # col sum + yield SampleInput([c(A), c(B)], "ij,kj->ik") # matmul + yield SampleInput([c(A), c(E)], "ij,Ab->ijAb") # matrix outer product + + # Tensor operations + yield SampleInput([c(C), c(D)], "aij,ajk->aik") # batch matmul + yield SampleInput([c(D), c(E)], "aij,jk->aik") # tensor matrix contraction + yield SampleInput([c(C), c(B)], "ijk,ik->j") # non contiguous + + # Test diagonals + yield SampleInput([c(I)], 'iji->j') # non-contiguous trace + + # Test ellipsis + yield SampleInput([c(H)], "i...->...") + yield SampleInput([c(C), c(x)], '...ik, ...j -> ij') + + +def sample_inputs_flip(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + sizes = ((S, M, S), (S, 0, M)) + all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ()) + + for size, dims in product(sizes, all_dims): + yield SampleInput(make_arg(size), kwargs={"dims": dims}) + +def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs): + shapes = [ + (S, M, S), + (S, 0, M), + ] + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + return (SampleInput(make_arg(shape, low=None, high=None)) for shape in shapes) + +def error_inputs_fliplr(op, device, **kwargs): + yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device)), + error_regex="Input must be >= 2-d.") + +def error_inputs_flipud(op, device, **kwargs): + yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device)), + error_regex="Input must be >= 1-d.") + +def sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) + shape = (S, M, S) + + yield SampleInput(make_arg(shape), args=(make_arg(shape), make_arg(shape))) + yield SampleInput(make_arg(shape), args=(make_arg(shape[1:]), make_arg(shape[1:]))) + yield SampleInput(make_arg(shape), args=(make_arg((S, 1, S)),)) + yield SampleInput(make_arg(shape), args=(None, make_arg(shape))) + yield SampleInput(make_arg(shape), args=(make_arg(shape), None)) + +def reference_inputs_elementwise_ternary(op, device, dtype, requires_grad, *, sample_inputs_func, supports_scalars=False, **kwargs): + yield from sample_inputs_func(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_scalar_tensor = partial(make_tensor, (), device='cpu', dtype=dtype, requires_grad=requires_grad) + supported_dtypes = op.supported_dtypes(device) + + # broadcasting and oncontiguous cases + cases = ( + ((4, 4), (4, 4), (4, 4)), + ((4, 4), (1, 4, 4), (4, 4)), + ((4, 4), (1, 4, 4), (4, 1, 4)), + ((4, 4, 1), (1, 4, 4), (4, 4)), + ((4, 1), (1, 4, 4), (1, 4)), + ((4, 4), (), (4, 4)), + ((4, 4), (), ()), + ((), (4, 4), (1, 4, 4)), + ) + + for a, b, c in cases: + yield SampleInput(make_arg(a), args=(make_arg(b), make_arg(c))) + yield SampleInput(make_arg(a, noncontiguous=True), + args=(make_arg(b).transpose(0, -1), make_arg(c, noncontiguous=True).transpose(0, -1))) + + # scalar cases + if supports_scalars: + cases = [ + ((), 1, 2,), + ((), 1., 2), + ((4, 4), 1., 2,), + ((3, 4), make_scalar_tensor(), make_scalar_tensor()), + ] + + if torch.complex64 in supported_dtypes: + cases.extend([ + ((3, 1, 4), complex(1, 2), 3.), + ]) + + for a, b, c in cases: + yield SampleInput(make_arg(a), args=(b, c)) + + # type promotion cases + # int x float + if torch.float in supported_dtypes and torch.long in supported_dtypes: + a = make_arg((), dtype=torch.long) + b = make_arg((1, 4), dtype=torch.float) + c = make_arg((3, 4)) + + cases = ( + (a, b, c), + (c, a, b), + ) + + for a, b, c in cases: + yield SampleInput(a, args=(b, c)) + + # NaN propagation + if dtype.is_floating_point or dtype.is_complex: + nan = float('nan') if dtype.is_floating_point else complex(float('nan'), float('nan')) + + a = make_arg((12,)) + a[4] = nan + a[7] = nan + b = make_arg((12,)) + b[1] = nan + b[7] = nan + c = make_arg((12,)) + c[9] = nan + + yield SampleInput(a, args=(b, c)) + + +def _clamp_min_numpy(a, min=None): + return np.maximum(a, min) + + +def _clamp_max_numpy(a, max=None): + return np.minimum(a, max) + + +def _clamp_numpy(a, min=None, max=None): + if min is None: + return np.minimum(a, max) + if max is None: + return np.maximum(a, min) + + return np.minimum(max, np.maximum(a, min)) + + +def sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs): + def make_arg(shape): + # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck + return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) + + def prod_zeros(dim_select): + assert len(dim_select) == 2 + result = make_arg(3 * (S,)) + result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_() + result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_() + result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_() + return result + + for dim in range(3): + yield SampleInput(make_arg((S, S, S)), args=(dim,)) + # Scalar tensors and empty tensor + for size in [(), (1,), (0,)]: + yield SampleInput(make_arg(size), args=(0,)) + + yield SampleInput(prod_zeros([0, 1]), args=(1,)) + yield SampleInput(prod_zeros([0, 2]), args=(1,)) + yield SampleInput(prod_zeros([1, 2]), args=(1,)) + + # test dtype kwarg + yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype}) + +def sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs): + yield SampleInput(make_tensor((S, 2), dtype=dtype, device=device, requires_grad=requires_grad)) + +def sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + sizes = ((S, S), ()) + return (SampleInput(make_arg(size)) for size in sizes) + +def error_inputs_complex(op_info, device, is_ref=False, **kwargs): + make_arg = partial(make_tensor, dtype=torch.float32, device=device) + + if is_ref: + error_float = "Expected both inputs to be Half, Float or Double tensors but got torch.float32 and torch.int32" + error_dtype = "Expected object of scalar type torch.float32 but got scalar type torch.float64 for second argument" + error_out = "Expected out tensor to have dtype torch.complex128 but got torch.complex64 instead" + else: + error_float = "Expected both inputs to be Half, Float or Double tensors but got Float and Int" + error_dtype = "Expected object of scalar type Float but got scalar type Double for second argument" + error_out = "Expected object of scalar type ComplexDouble but got scalar type ComplexFloat for argument 'out'" + + yield ErrorInput(SampleInput(make_arg(M, S), make_arg(M, S, dtype=torch.int)), + error_type=RuntimeError, error_regex=error_float) + + yield ErrorInput(SampleInput(make_arg(M, S), make_arg(M, S, dtype=torch.float64)), + error_type=RuntimeError, error_regex=error_dtype) + + yield ErrorInput(SampleInput(make_arg(M, S, dtype=torch.float64), make_arg(M, S, dtype=torch.float64), + out=make_arg(M, S, dtype=torch.complex64)), + error_type=RuntimeError, error_regex=error_out) + +def sample_inputs_logaddexp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + shape = (S, S) + yield SampleInput(make_arg(shape), make_arg(shape)) + +def sample_inputs_prod(op_info, device, dtype, requires_grad, **kwargs): + def make_arg(shape): + # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck + return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) + + def prod_single_zero(): + result = make_arg(2 * (S,)) + result[0, 1] = 0 + return result + + for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): + # only Tensor, ignore other inputs + yield SampleInput(sample.input.clone().requires_grad_(requires_grad)) + yield sample + + # Generates samples with keepdim = True + for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): + sample.kwargs['keepdim'] = True + yield sample + + yield SampleInput(prod_single_zero()) + yield SampleInput(make_arg((3, 3, 3)), args=(1,)) + yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True}) + + yield SampleInput(make_arg((3, 0)), args=(1,)) + yield SampleInput(make_arg((3, 0)), args=(1,), kwargs={'keepdim': True}) + yield SampleInput(torch.tensor([2., 3, 0, 0], dtype=dtype, device=device, requires_grad=requires_grad)) + + # test zero scalar tensor + zero = make_arg(()) + zero.zero_() + yield SampleInput(zero.clone().requires_grad_(requires_grad)) + yield SampleInput(zero.clone().requires_grad_(requires_grad), args=(0,)) + yield SampleInput(zero.clone().requires_grad_(requires_grad), + args=(0,), + kwargs={'keepdim': True}) + +def error_inputs_neg(op_info, device, **kwargs): + si = SampleInput(torch.tensor((False, True), device=device)) + msg = ("Negation, the `\\-` operator, on a bool tensor is not supported." + " If you are trying to invert a mask, use the `\\~` or" + " `logical_not\\(\\)` operator instead.") + yield ErrorInput(si, error_regex=msg) + +def sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + yield SampleInput(make_arg(M)) + + tensors = ( + make_arg((M, M)), + make_arg((3, 5)), + make_arg((5, 3)), + ) + + args = ((), (2,), (-2,), (1,), (2,)) + + for tensor, arg in product(tensors, args): + yield SampleInput(tensor.clone().requires_grad_(requires_grad), *arg) + +def reference_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_diagonal_diag_embed( + op_info, device, dtype, requires_grad, **kwargs) + + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shapes1d = ((0,), (1,)) + shapes2d = ((L, M),) + shapes3d = ((L, M, S),) + + kwargs1d = {} + + kwargs2d = ( + # dim1 > dim2 is allowed + dict(dim1=1, dim2=0), + # negative dims are allowed + dict(dim1=-2, dim2=-1), + # out of bounds offset should return an empty tensor in diagonal and + # offset the diagonal in diag_embed + dict(offset=100), + ) + + kwargs3d = kwargs2d + ( + # make sure we can use non-sequential dims + dict(offset=-1, dim1=0, dim2=2), + ) + + samples1d = product(shapes1d, kwargs1d) + samples2d = product(shapes2d, kwargs2d) + samples3d = product(shapes3d, kwargs3d) + + for shape, kwargs in chain(samples1d, samples2d, samples3d): + if 'diagonal' in op_info.name: + # these are error inputs for diagonal + if shape in ((0,), (1,)): + continue + yield SampleInput(input=make_arg(shape), kwargs=kwargs) + + +def sample_inputs_diagonal_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # Shapes for 2D Tensors + shapes_2d = ((M, M), (3, 5), (5, 3)) + + # Shapes for 3D Tensors + shapes_3d = ((M, M, M),) + + args_2d = ((), (2,), (-2,), (1,)) + args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1)) + + for input_shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)): + input_ = make_arg(input_shape) + # We can programmatically figure out the right shape for src: + # It should be the same size as input.diagonal(other_args...) + if not isinstance(arg, tuple): + arg_tuple = (arg,) + else: + arg_tuple = arg + src_shape = input_.diagonal(*arg_tuple).size() + src = make_arg(src_shape) + yield SampleInput(input_, args=(src, *arg_tuple)) + + +def sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S))).with_metadata(output_process_fn_grad=lambda x: x.to_dense()) + yield SampleInput(make_arg((S, S)), 1).with_metadata(output_process_fn_grad=lambda x: x.to_dense()) + +def sample_inputs_cross_entropy(op_info, device, dtype, requires_grad, **kwargs): + batch_size, num_classes = shape = (2, 3) + reductions = ("mean", "sum", "none") + + input_shape_and_kwargs: List[Tuple[Tuple[int, ...], Dict[str, Any]]] = [ + (shape, {}), + ((*shape, 1), {}), + ((*shape, 1, 2), {}), + ((*shape, 1, 2, 3), {}), + *[(shape, dict(reduction=reduction)) for reduction in reductions], + *[ + ( + shape, + dict( + weight=make_tensor((num_classes,), device=device, dtype=dtype), + reduction=reduction, + ), + ) + for reduction in reductions + ], + (shape, dict(ignore_index=1)), + ] + + for (input_shape, kwargs), probabilities_target in itertools.product(input_shape_and_kwargs, (False, True)): + input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad) + + if probabilities_target: + # ignore_index is not supported for probabilities target + if "ignore_index" in kwargs: + continue + + target = make_tensor( + input_shape, + low=0, + high=1, + device=device, + dtype=dtype, + requires_grad=requires_grad, + ) + else: + target = make_tensor( + (batch_size, *input_shape[2:]), + low=0, + high=num_classes, + device=device, + dtype=torch.long, + ) + + if "ignore_index" in kwargs and torch.all(target == kwargs["ignore_index"]): + # make sure at least one item in target is not ignored + target[0] = random.sample(sorted(set(range(num_classes)) - {kwargs["ignore_index"]}), 1)[0] + + yield SampleInput(input, target, **kwargs) + + +def sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs): + low, high = op_info.domain + + # Note: Operator is very sensitive at points near the + # start and end of domain and leads to NaN for float16 + # if domain_eps is 1e-5. + if dtype.is_floating_point or dtype.is_complex: + domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2 + + low = low + domain_eps + high = high - domain_eps + + make_arg = partial(make_tensor, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S, S))) + yield SampleInput(make_arg((S, S, S)), 0.2) + yield SampleInput(make_arg(())) + yield SampleInput(make_arg(()), 0.2) + +def sample_inputs_isin(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # isin has two paths based on the size of elements and test_elements. + # if elements.numel() < 10 * pow(test_elements.numel(), 0.145): + yield SampleInput(make_arg((L,)), args=(make_arg((S,)),)) + # else: + yield SampleInput(make_arg((S,)), args=(make_arg((L,)),)) + +def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S)))) + yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S)))) + yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S)))) + yield SampleInput(make_arg((S,)), + args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))), + broadcasts_input=True) + +def error_inputs_masked_scatter(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float) + for mask_dtype in [torch.float, torch.uint8]: + yield ErrorInput(SampleInput(make_arg(1, 3), args=(torch.ones(1, 3, device=device, dtype=mask_dtype), + make_arg(3, 4))), + error_regex=r"masked_scatter_ only supports boolean masks") + +def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10)) + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(()))) + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10)) + yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10)) + yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(()))) + yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10)) + + yield SampleInput(make_arg((S,)), + args=(torch.randn(S, S, device=device) > 0, make_arg(())), + broadcasts_input=True) + yield SampleInput(make_arg((S,)), + args=(torch.randn(S, S, device=device) > 0, 10), + broadcasts_input=True) + + if torch.device(device).type == 'cuda': + # `self` and `mask` on CUDA but `value` is a CPU scalar tensor. + yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, torch.randn(()))) + +def error_inputs_masked_fill(op_info, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) + # `value` is not a 0-D tensor. + yield ErrorInput(SampleInput(make_arg((2, 2)), args=(make_arg(()) > 0, make_arg((1,)))), + error_regex="only supports a 0-dimensional value tensor, but got tensor with 1 dimension") + # downcasting complex value (scalar overload) + yield ErrorInput(SampleInput(make_arg((2, 2)), args=(make_arg(()) > 0, 1j)), + error_regex=r"value cannot be converted to type .* without overflow") + # downcasting complex value (tensor overload) + yield ErrorInput(SampleInput(torch.ones(2, dtype=torch.long, device=device), + args=(make_arg(()) > 0, torch.tensor(1j, device=device))), + error_regex=r"value cannot be converted to type .* without overflow") + + if torch.device(device).type == 'cuda': + # `self` and `mask` on CPU but `value` is a CUDA scalar tensor. + yield ErrorInput(SampleInput(torch.randn((S, S), device='cpu'), + args=(torch.randn(S, S, device='cpu') > 0, + torch.randn((), device='cuda'))), + error_regex=r"to be on same device") + + +def sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) + + yield SampleInput(make_arg((M, M)), torch.randn(M, M, device=device) > 0) + + yield SampleInput(make_arg((M, M)), torch.randn((M,), device=device) > 0) + yield SampleInput(make_arg((M,)), torch.randn((M, M), device=device) > 0) + + yield SampleInput(make_arg((M, 1, M)), torch.randn((M, M), device=device) > 0) + + yield SampleInput(make_arg(()), torch.tensor(1, device=device, dtype=torch.bool)) + + yield SampleInput(make_arg((M, M)), torch.tensor(1, device=device, dtype=torch.bool)) + + yield SampleInput(make_arg(()), torch.randn((M, M), device=device) > 0) + +def sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(make_arg((S, S))) + yield SampleInput(make_arg((S, S, S))) + +def sample_inputs_matmul(op_info, device, dtype, requires_grad, is_rmatmul=False, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, + high=None, requires_grad=requires_grad) + test_cases = (((L,), (L,)), + ((S, M), (M,)), + ((M,), (M, S)), + ((S, M), (M, S)), + ((S, 0), (0, M)), + ((S, S, M), (M,)), + ((S, S, M), (M, S)), + ((S, S, 0), (0, S)), + ((M,), (S, M, S)), + ((S, M), (S, M, S)), + ((0, 0), (S, 0, 0)), + ((S, S, M, M), (S, S, M, S)), + ((S, S, M, M), (M,)), + ((M,), (S, S, M, S)), + ((S, S, S), (1, S, S)) + ) + for lhs_shape, rhs_shape in test_cases: + lhs = make_arg(lhs_shape) + rhs = make_arg(rhs_shape) + if not is_rmatmul: + yield SampleInput(lhs, rhs) + else: + yield SampleInput(rhs, lhs) + + +def sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype, + requires_grad: bool, + *, variant: str, **kwargs) -> List[SampleInput]: + if variant == 'variadic': + def make_inputs( + tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor, + List[torch.Tensor]], + Tuple[torch.Tensor, ...]]: + return tensors + elif variant == 'list': + def make_inputs( + tensors: List[torch.Tensor]) -> Tuple[Union[torch.Tensor, + List[torch.Tensor]], + Tuple[torch.Tensor, ...]]: + return [tensors] + else: + raise ValueError( + 'Unsupported variant, must be one of {"variadic", "list"}. ' + f'Got "{variant}".') + + SCALAR = torch.Size([]) + VECTOR = torch.Size([3]) + test_cases: List[List[torch.Size]] = [ + [SCALAR], + [VECTOR], + [VECTOR, SCALAR], + [VECTOR, SCALAR, VECTOR], + [VECTOR, SCALAR, VECTOR, SCALAR], + ] + + for shapes, indexing in itertools.product(test_cases, {'xy', 'ij'}): + args = make_inputs( + [make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes]) + yield SampleInput(*args, indexing=indexing) + + +def sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + tensor_shapes = ((S, S), ()) + ns = (1, 2, 3, 4, 5) + + # Since the accepted lower bound for input + # to mvlgamma depends on `p` argument, + # the following function computes the lower bound + # which we pass to `make_tensor`. + def compute_min_val(p): + return (p - 1.) / 2 + + for shape, n in product(tensor_shapes, ns): + min_val = compute_min_val(n) + if not dtype.is_floating_point: + # Round-up minimum value for integral dtypes + min_val += 1 + else: + min_val += 2 * torch.finfo(dtype).eps + yield SampleInput(make_arg(shape, low=min_val), args=(n,)) + + +# Since `mvlgamma` has multiple entries, +# there are multiple common skips for the additional +# entries. Following function is a helper to that end. +def skips_mvlgamma(skip_redundant=False): + skips = ( + # outside domain values are hard error for mvlgamma op. + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_float_domains'), + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_extremal'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.float16, torch.int8)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + dtypes=(torch.int8,)), + ) + if skip_redundant: + # Redundant tests + skips = skips + ( # type: ignore[assignment] + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + ) + return skips + + +# To test reference numerics against multiple values of argument `p`, +# we make multiple OpInfo entries with each entry corresponding to different value of p. +# We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing. +def make_mvlgamma_opinfo(variant_test_name, domain, skips, sample_kwargs): + return UnaryUfuncInfo('mvlgamma', + ref=reference_mvlgamma if TEST_SCIPY else None, + aliases=('special.multigammaln',), + variant_test_name=variant_test_name, + domain=domain, + decorators=(precisionOverride({torch.float16: 5e-2}),), + dtypes=all_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16), + sample_inputs_func=sample_inputs_mvlgamma, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=skips, + sample_kwargs=sample_kwargs) + + +def sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs): + def _make_tensor_helper(shape, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + yield SampleInput(_make_tensor_helper((S, S, S)), 0) + yield SampleInput(_make_tensor_helper((S, S, S)), 1) + yield SampleInput(_make_tensor_helper(()), 0) + + if supports_dtype_kwargs: + # NOTE: if `dtype` is not same as input, then inplace variants fail with + # `provided dtype must match the dtype of self tensor in cumsum` + yield SampleInput(_make_tensor_helper((S, S, S)), 1, dtype=dtype) + + +def sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs): + test_cases = ( + ((), (0, 1, 1)), + ((S, S, S, S), (0, 3, 1)), + ((S, S, S, S), (1, 3, 1)), + ((S, S, S, S), (2, 3, 1)), + ((S, S, S, S), (3, 3, 1)), + ((S, S, S, S), (0, 3, 2)), + ((S, S, S, S), (1, 3, 2)), + ((S, S, S, S), (2, 3, 2)), + ((S, S, S, S), (3, 3, 2)), + ((S, S, S, S), (0, 4, 1)), + ((S, S, S, S), (1, 4, 1)), + ((S, S, S, S), (2, 4, 1)), + ((S, S, S, S), (3, 4, 1)), + ((M,), (0, 3, 1)), + ((M,), (0, 3, 2)), + ((M,), (0, 3, 3)), + ((1000,), (0, 3, 11)), + ((1000,), (0, 2, 27)), + ((10, 10), (0, 1, 2)), + ((10, 10), (1, 2, 3)), + ((10, 10), (1, 2, 2)), + ((S, S, S), (2, 3, 2)), + ) + + for shape, arguments in test_cases: + yield SampleInput(make_tensor(shape, dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad), + *arguments) + +def sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if list_args: + cases = ( + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]),)), + ((S, S, S), (torch.Size([int(S / 2), S - int(S / 2) * 2, int(S / 2)]), 2),), + ((S, S, S), (torch.Size([int(S / 2), S - int(S / 2) * 2, int(S / 2)]), -2),) + ) + else: + cases = ( # type: ignore[assignment] + ((S, S, S), (2,)), + ((S, S, S), (S, 1)), + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + cases = (((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]),)), + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3), 0]),)), + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]), 2)), + ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]), -2)), + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_msort(op_info, device, dtype, requires_grad, **kwargs): + def apply_grad(t): + if dtype in floating_types_and(torch.float16, torch.bfloat16): + t.requires_grad_(requires_grad) + + def large_1d_unique(dtype, device): + res = torch.randperm(L * L * L, dtype=torch.int64, device=device) + res = res.to(dtype) + apply_grad(res) + return res + + # Test case for large tensor. + yield SampleInput(large_1d_unique(dtype, device)) + + yield SampleInput(make_tensor((S, M, S), dtype=dtype, device=device, + low=None, high=None, + requires_grad=requires_grad)) + +def sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # no broadcast + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 0.4) + # broadcast rhs + yield SampleInput(make_arg((S, S)), make_arg((S,)), 0.4) + # scalar tensor + yield SampleInput(make_arg(()), make_arg(()), 0.4) + # broadcast rhs scalar-tensor + yield SampleInput(make_arg((S, S)), make_arg(()), 0.4) + # broadcast rhs with weight tensor + yield SampleInput(make_arg((S, S)), make_arg((S,)), make_arg((S, S))) + # broadcast rhs and weight tensor + yield SampleInput(make_arg((S, S)), make_arg((S, 1)), make_arg((S,))) + # broadcast lhs + yield SampleInput(make_arg((S,)), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) + # scalar broadcast_lhs + yield SampleInput(make_arg(()), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) + # broadcast all + yield SampleInput(make_arg((S, 1)), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) + # tensor broadcast all + yield SampleInput(make_arg((S, 1)), make_arg((S, S)), make_arg((S, 1))).with_metadata( + broadcasts_input=True) + # no broadcast with weight tensor + yield SampleInput(make_arg((S, S)), make_arg((S, S)), make_arg((S, S))) + # broadcast lhs with weight tensor + yield SampleInput(make_arg((S,)), make_arg((S, S)), make_arg((S, S))).with_metadata( + broadcasts_input=True) + # broadcast lhs and weight tensor + yield SampleInput(make_arg((S,)), make_arg((S, S, S)), make_arg((S, S))).with_metadata( + broadcasts_input=True) + # broadcast lhs and weight tensor variant + yield SampleInput(make_arg((S, S)), make_arg((S, S, S)), make_arg((S,))).with_metadata( + broadcasts_input=True) + + if dtype.is_complex: + # no broadcast + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 0.4j) + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 1.2 + 0.1j) + # broadcast rhs + yield SampleInput(make_arg((S, S)), make_arg((S,)), 0.4j) + yield SampleInput(make_arg((S, S)), make_arg((S, S)), 5.4 + 9j) + # scalar tensor + yield SampleInput(make_arg(()), make_arg(()), 0.4j) + yield SampleInput(make_arg(()), make_arg(()), 6.1 + 0.004j) + # broadcast rhs scalar-tensor + yield SampleInput(make_arg((S, S)), make_arg(()), 0.4j) + yield SampleInput(make_arg((S, S)), make_arg(()), 1 + 2j) + +def sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs): + cases = ( + ((2, 2, 2), (2, 2, 2), (2)), + ((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])), + ) + for first_shape, second_shape, dims in cases: + yield SampleInput(make_tensor(first_shape, dtype=dtype, device=device, + requires_grad=requires_grad), + make_tensor(second_shape, dtype=dtype, device=device, + requires_grad=requires_grad), + dims=dims) + +def sample_inputs_kron(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial( + make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None) + test_cases = ( + ((S, S), (M, L)), + ) + + for input_shape, other_shape in test_cases: + input = make_arg(input_shape) + other = make_arg(other_shape) + yield SampleInput(input, other) + +def sample_inputs_inner(self, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(make_arg(S), make_arg(S)) + yield SampleInput(make_arg(), make_arg(S, S)) + +def sample_inputs_scatter(op_info, device, dtype, requires_grad, **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + def _gather(shape, index_dim, max_indices): + return gather_variable(shape, index_dim, max_indices, device=device) + + zero = torch.tensor(0, dtype=torch.long, device=device) + test_cases = ( + (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))), + (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))), + (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))), + (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))), + (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), + (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), + (_tensor(()), (0, zero.clone().detach(), _tensor(()))), + (_tensor(()), (0, zero.clone().detach(), 2.5)), + ) + + for tensor, args in test_cases: + yield SampleInput(tensor, *args) + + if not requires_grad: + yield SampleInput(tensor.clone().detach(), *args, reduce='add') + + if dtype.is_floating_point: + yield SampleInput(tensor.clone().detach(), *args, reduce='multiply') + +def sample_inputs_scatter_add(op_info, device, dtype, requires_grad, **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + def _gather(shape, index_dim, max_indices): + return gather_variable(shape, index_dim, max_indices, device=device) + + zero = torch.tensor(0, dtype=torch.long, device=device) + yield SampleInput(_tensor((M, S)), 0, _gather((S, S), 1, M), _tensor((S, S))) + yield SampleInput(_tensor((M, S)), 1, _gather((S, S), 0, S), _tensor((S, S))) + yield SampleInput(_tensor((M, S)), -1, _gather((S, S), 0, S), _tensor((S, S))) + yield SampleInput(_tensor((M, S)), 0, _gather((M, S // 2), 1, M), _tensor((M, S // 2))) + yield SampleInput(_tensor((M, S)), 1, _gather((M, S // 2), 0, S), _tensor((M, S // 2))) + yield SampleInput(_tensor((M, S)), -1, _gather((M, S // 2), 0, S), _tensor((M, S // 2))) + yield SampleInput(_tensor(()), 0, zero.clone().detach(), _tensor(())) + +def sample_inputs_scatter_reduce(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + gather = partial(gather_variable, device=device) + + zero = torch.tensor(0, dtype=torch.long, device=device) + test_cases = ( + ((M, S), 0, gather((S, S), 1, M), (S, S)), + ((M, S), 1, gather((S, S), 0, S), (S, S)), + ((M, S), -1, gather((S, S), 0, S), (S, S)), + ((M, S), 0, gather((M, S // 2), 1, M), (M, S // 2)), + ((M, S), 1, gather((M, S // 2), 0, S), (M, S // 2)), + ((M, S), -1, gather((M, S // 2), 0, S), (M, S // 2)), + ((), 0, zero.clone().detach(), ()), + ) + + reduce = op_info.variant_test_name + for (inp_shape, dim, index, src_shape), include_self in product(test_cases, [False, True, False]): + yield SampleInput(make_arg(inp_shape), + args=(dim, index, make_arg(src_shape), reduce), + kwargs={'include_self': include_self}) + + + # Sample inputs to test edge cases for backward + # Check that gradients are propagated correctly for prod when zeros in self/src are reduced + if requires_grad and reduce == 'prod': + # This sample tests gradients for the following cases + # (a) 1 zero reduced (from src (self[0, 1], self[1, 1]), from self (self[0, 0], self[2, 0])) + # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0]) + # (c) no zeros reduced (self([2, 1])) + # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py + # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad + input = torch.tensor([[0, 13], [0, 17], [0, 19]], dtype=dtype, device=device, requires_grad=requires_grad) + src = torch.tensor([[0, 1, 2, 3], [0, 4, 0, 1], [2, 3, 5, 6]], dtype=dtype, device=device, requires_grad=requires_grad) + idx = torch.tensor([[1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]], dtype=torch.long, device=device) + + yield SampleInput(input, + args=(1, idx, src, reduce), + kwargs={'include_self': True}) + +def sample_inputs_segment_reduce(op_info, device, dtype, requires_grad, *, mode='lengths', **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + zero = torch.tensor(0, dtype=torch.long, device=device) + test_cases = ( + # inp_shape, dim, lengths, unsafe + ((S,), 0, [0, 1, 2, 2], False), + ((S,), 0, [0, 1, 2, 2], True), + ((S,), 0, [2, 0, 3, 0], False), + ((S, S), 0, [0, 1, 2, 2], False), + # test when lengths do not sum to dim size + ((M, S, S), 0, [1, 2, 0, 6, 0], True), + # test for higher dimensions + ((S, S), 1, [[0, 1, 2, 2] for _ in range(S)], False), + ((S, S), 1, [[2, 0, 3, 0], [0, 1, 2, 2], [3, 0, 2, 0], [1, 1, 1, 2], [0, 1, 2, 2]], False), + ((S, S, S), 1, [[0, 1, 2, 2] for _ in range(S)], False), + ((S, S, S), 1, [[2, 0, 3, 0], [0, 1, 2, 2], [3, 0, 2, 0], [1, 1, 1, 2], [0, 1, 2, 2]], False), + ) + + reductions = ["max", "mean", "min", "sum", "prod"] + for args, reduce, initial in product(test_cases, reductions, [1, 2]): + inp_shape, dim, lengths, unsafe = args + lengths_t = torch.tensor(lengths, dtype=torch.long, device=device) + sample_input_kwargs = {'axis': dim, 'unsafe': unsafe, 'initial': initial} + if mode == 'lengths': + sample_input_kwargs['lengths'] = lengths_t + elif mode == 'offsets': + zeros_shape = list(lengths_t.shape) + zeros_shape[dim] = 1 + offsets_t = torch.cat((lengths_t.new_zeros(zeros_shape), lengths_t), dim).cumsum_(dim) + sample_input_kwargs['offsets'] = offsets_t + else: + raise RuntimeError(f"mode most be one of 'offsets' or 'lengths' got '{mode}'.") + yield SampleInput(_tensor(inp_shape), + args=(reduce,), + kwargs=sample_input_kwargs) + + +def sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput(make_arg((S, S, S))) + yield SampleInput(make_arg(())) + yield SampleInput(make_arg((S, S, S), noncontiguous=True)) + +def sample_inputs_unravel_index(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, + low=None, high=None, requires_grad=requires_grad) + yield SampleInput( + torch.tensor( + [[3, 8, 13], [0, 5, 10]], + device=device, + dtype=dtype), + (4, 5)) + yield SampleInput( + torch.tensor([[3, 8, 13], [0, 5, 10]], device=device, dtype=dtype), + (4, 2**30)) + yield SampleInput( + torch.tensor([[3, 8, 13], [0, 5, 10]], device=device, dtype=dtype), + (2**30, 4)) + yield SampleInput( + torch.tensor(2, device=device, dtype=dtype), + (2, 2)) + max_val = 2**(8 * dtype.itemsize - (1 if dtype.is_signed else 0)) - 1 + yield SampleInput( + torch.tensor(max_val - 1, device=device, dtype=dtype), + (1, max_val)) + yield SampleInput( + torch.tensor([22, 41, 37], device=device, dtype=dtype), + (7, 6)) + yield SampleInput( + torch.tensor(min(1621, max_val), device=device, dtype=dtype), + (6, 7, 8, 9)) + yield SampleInput( + torch.tensor([], device=device, dtype=dtype), + (10, 3, 5)) + yield SampleInput( + torch.tensor( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], + device=device, + dtype=dtype), + (5, 8)) + yield SampleInput( + torch.tensor( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], + device=device, + dtype=dtype), + (5, 8, 10)) + yield SampleInput( + torch.tensor(0, device=device, dtype=dtype), + ()) + + a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) + b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) + _, i1, i2 = np.intersect1d(a, b, assume_unique=True, return_indices=True) + yield SampleInput(torch.tensor(i1, device=device, dtype=dtype), a.shape) + yield SampleInput(torch.tensor(i2, device=device, dtype=dtype), b.shape) + + a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) + b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) + _, i1, i2 = np.intersect1d(a, b, return_indices=True) + yield SampleInput(torch.tensor(i1, device=device, dtype=dtype), a.shape) + yield SampleInput(torch.tensor(i2, device=device, dtype=dtype), b.shape) + + +def sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + cases = (((M, M), ()), + ((M, M), (2,),), + ((M, S), ()), + ((M, S), (-1,)), + ((M, M), (2,),), + ((S, M, S), ()), + ((S, M, S), (2,)), + ((3, 3, S, S), ()),) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + +def error_inputs_tril_triu(opinfo, device, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=torch.float32) + + # error inputs for input.ndim <= 2 + yield ErrorInput(SampleInput(make_arg((4,))), error_regex="input tensor must have at least 2 dimensions") + +def sample_inputs_trilu_indices(op_info, device, dtype, requires_grad, **kwargs): + # (row, col, offset) + args_list = ((0, 0), + (20, 0), + (0, 20), + (20, 21, 0), + (20, 21, 7), + (20, 21, -7), + # Large test cases below are deliberately commented out to speed up CI + # tests and to avoid OOM error. When modifying implementations of + # tril_indices and triu_indices, please enable these tests and make sure + # they pass. + # (2, 68435455, 3), + # (5000, 5000), + # (5000, 5000, 1234), + # (5000, 5000, -1233), + ) + for args in args_list: + yield SampleInput(args[0], args=args[1:], kwargs={"dtype": dtype, "device": device}) + +def sample_inputs_clone_contiguous(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + yield SampleInput(make_arg((S, M, S))) + yield SampleInput(make_arg(())) + +def reference_inputs_clone_contiguous(op, device, dtype, requires_grad, **kwargs): + # NOTE: the default memory format for clone is torch.preserve_format, for contiguous it's torch.contiguous_format + # This exploits that default to test torch.preserve_format for clone, without causing an error when testing contiguous + yield from sample_inputs_clone_contiguous(op, device, dtype, requires_grad, **kwargs) + + shapes = ( + (3, 5, 6), + (1, 1, 3, 5, 6), + (1, 1, 3, 5, 6, 1, 1), + (1, 0, 3, 5, 0, 2), + (1, 0, 3, 5, 0, 0, 1, 1, 2), + (), + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes: + yield SampleInput(make_arg(shape)) + yield SampleInput(make_arg(shape).transpose(0, -1)) + yield SampleInput(make_arg(shape, noncontiguous=True)) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) + + yield SampleInput(make_arg(shape), kwargs={'memory_format': torch.contiguous_format}) + yield SampleInput(make_arg(shape).transpose(0, -1), kwargs={'memory_format': torch.contiguous_format}) + yield SampleInput(make_arg(shape, noncontiguous=True), kwargs={'memory_format': torch.contiguous_format}) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), kwargs={'memory_format': torch.contiguous_format}) + + # shape, strides, offset + strided_cases = ( + ((5, 6, 2), (1, 1, 7), 2), + ((5, 5, 4), (1, 1, 7), 2), + ((5, 5, 2), (4, 5, 7), 3), + ((5, 5, 2), (5, 5, 7), 3), + ((5, 5, 2), (5, 5, 5), 3), + ((9, 5, 2), (0, 1, 7), 3), + ) + + for shape, strides, offset in strided_cases: + yield SampleInput(make_arg(500,).as_strided(shape, strides, offset)) + yield SampleInput(make_arg(500,).as_strided(shape, strides, offset), kwargs={'memory_format': torch.contiguous_format}) + + # channels last 2D + yield SampleInput(make_arg((2, 2, 2, 2)), kwargs={'memory_format': torch.channels_last}) + a = make_arg((2, 2, 2, 2)).permute(0, 3, 1, 2) + yield SampleInput(a, kwargs={'memory_format': torch.channels_last}) + + # channels last 3D + yield SampleInput(make_arg((2, 2, 2, 2, 2)), kwargs={'memory_format': torch.channels_last_3d}) + a = make_arg((2, 2, 2, 2, 2)).permute(0, 4, 1, 2, 3) + yield SampleInput(a, kwargs={'memory_format': torch.channels_last_3d}) + + +def sample_inputs_sum_to_size(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # list of tuples (shape, shape) defining the shapes of the input and output tensors + sample_shapes = [ + ((), ()), + ((S,), (1,)), + ((S, S), (1, 1)), + ((S, S), (1, S)), + ((S, S), (S, S)), + ((S, S, S), (S, 1, S)), + ] + + for input_shape, output_shape in sample_shapes: + yield SampleInput(make_arg(input_shape), args=(output_shape,)) + if output_shape == (): + continue + yield SampleInput(make_arg(input_shape), args=(list(output_shape),)) + yield SampleInput(make_arg(input_shape), args=(*output_shape,)) + + +def error_inputs_sum_to_size(op_info, device, **kwargs): + shape = (M, S, M) + err_msg = "is not expandable to size" + si = SampleInput(make_tensor(shape, device=device, dtype=torch.float32), args=(M, M)) + yield ErrorInput(si, error_regex=err_msg) + + shape = (M + 1, S, S, M) + err_msg = "is not expandable to size" + si = SampleInput(make_tensor(shape, device=device, dtype=torch.float32), args=(M + 1, 1)) + yield ErrorInput(si, error_regex=err_msg) + + +def sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device) + cases = (((S, S, S), (S * S, S)), + ((), ()), + ((), (1, 1, 1)), + ) + + for shape, args_or_shape in cases: + # Update `args` based on operator + if op_info.name == 'resize_': + # resize_ takes shape/tuple of ints, + args = (args_or_shape, ) + elif op_info.name == 'resize_as_': + # resize_as_ takes another tensor + args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment] + else: + raise ValueError("sample_inputs_resize_ops is being used with incorrect operator") + + yield SampleInput(make_arg(shape, requires_grad=requires_grad), args=args) + +def sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = ( + # a, b, is_tensor_supported + ((S, S, S), (S * S, S), True), + ((S * S, S), (S, S, S), True), + ((S * S, S), (S, -1, S), False), # neg index + ((S * S * 2, S), (S, -1), False), # neg index + ((S,), (S,), True), + ((), (), False), # empty + ((), (1,), True), + ) + + for a, b, is_tensor_supported in cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + # convert to tensor + if kwargs.get("tensor_arg"): + b = make_arg(b, requires_grad=False) + + yield SampleInput(make_arg(a), args=(b,)) + +def reference_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs) + + cases = ( + # a, b, is_tensor_supported + ((125,), (25, 5), True), + ((25, 25), (1, 5, 5, 1, 5, 1, 5, 1), True), + ((16, 32), (2, 4, 1, 4, 4, 1, 4), True), + ((16, 12), (12, 16), True), + ((1, 16, 12), (12, 16), True), + ((1, 5, 1, 5), (25, 1), True), + ((2, 4, 2), (4, 4), True), + ((1, 4), (1, 1, 2, 1, 2), True), + ((3, 5, 7), (7, 5, 3), True), + ((1,), (), False), # empty + ((5, 0, 2, 3), (5, 0, 2, 3), True), + ((2, 1, 0, 3, 1), (5, 0), True), + ((1,), (), False), # empty + ((4, 5, 6), (4, 5, 6, 1, 1, 1), True), + ((), (1, 1, 1, 1), False), # empty + ) + + irreversible_cases = ( + ((), (-1,), False), # neg index, empty + ((4, 7, 9, 1, 1), (1, 4, 3, -1, 1), False), # neg index + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for a, b, is_tensor_supported in cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + if kwargs.get("tensor_arg"): + # convert to tensor + yield SampleInput(make_arg(a), args=(make_arg(b, requires_grad=False),)) + yield SampleInput(make_arg(b), args=(make_arg(a, requires_grad=False),)) + else: + yield SampleInput(make_arg(a), args=(b,)) + yield SampleInput(make_arg(b), args=(a,)) + + for a, b, is_tensor_supported in irreversible_cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + # convert to tensor + if kwargs.get("tensor_arg"): + b = make_arg(b, requires_grad=False) + + yield SampleInput(make_arg(a), args=(b,)) + +def error_inputs_view_reshape(op, device, **kwargs): + + cases = ( + # a, b, is_tensor_supported + # Reshape to different numel + ((2,), (), False), # empty + ((1, 3, 0), (), False), # empty + ((4, 3), (4, 2), True), + ((1, 3, 5), (5, 2, 2), True), + # No valid inference + ((1, 3, 5), (5, -1, 2), False), # neg index + # Two inferred shapes + ((1, 3, 5), (5, -1, -1), False), # neg index + ((1), (0, -1), False), # neg index + ((0, 5), (0, -1), False), # neg index + ) + + make_arg = partial(make_tensor, dtype=torch.float32, device=device, requires_grad=False) + for a, b, is_tensor_supported in cases: + # skip unsupported cases + if kwargs.get("tensor_arg") and not is_tensor_supported: + continue + + if b == (5, -1, -1): + error_regex = "only one dimension can be inferred" + elif a == (0, 5): + error_regex = (r"cannot reshape tensor of 0 elements into shape " + r"\[0, -1\] because the unspecified dimension size " + r"-1 can be any value and is ambiguous") + else: + # to avoid having issues with a regex + shape = ', '.join(map(str, b)) + size = a if type(a) is int else functools.reduce(operator.mul, a, 1) + error_regex = rf"shape '\[{shape}\]' is invalid for input of size {size}" + + # convert to tensor + if kwargs.get("tensor_arg"): + b = make_arg(b, requires_grad=False) + + yield ErrorInput(SampleInput(make_arg(a), args=(b,)), error_type=Exception, + error_regex=error_regex) + + +def sample_inputs_atleast1d2d3d(op_info, device, dtype, requires_grad, **kwargs): + input_list = [] + shapes = ((S, S, S, S), (S, S, S), (S, S), (S, ), (),) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes: + yield SampleInput(make_tensor_partial(shape)) + yield SampleInput([make_tensor_partial(shape) for shape in shapes]) + +def sample_inputs_column_stack(op_info, device, dtype, requires_grad, **kwargs): + cases: Tuple[tuple, tuple] = ( # type: ignore[assignment] + ((S, 2, 1), (S, 3, 1)), + ((S), (S, 5)), ((), (1, S)) + ) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape1, shape2 in cases: + yield SampleInput([make_tensor_partial(shape1), make_tensor_partial(shape2)]) + +def sample_inputs_flatten(op_info, device, dtype, requires_grad, **kwargs): + shapes = ((S, S, S), (S, S), (S, ), (),) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape in shapes: + yield SampleInput(make_tensor_partial(shape)) + if len(shape) > 1: + yield SampleInput(make_tensor_partial(shape), start_dim=1, end_dim=-1) + +def reference_inputs_flatten(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_flatten(op, device, dtype, requires_grad, **kwargs) + + # shape x start_dim x end_dim + cases = ( + ((5, 4, 0, 1, 3, 7), 1, 3), + ((5, 4, 0, 1, 3, 7), 4, 5), + ((5, 4, 1, 1, 3, 7), 2, 3), + ((), 0, -1), + ((1,), 0, -1), + ((3, 7, 5), 1, 2), + ((4, 5), 1, 1), + ((1, 5, 5, 1, 5, 1, 5, 1), 0, 2), + ((1, 5, 5, 1, 5, 1, 5, 1), 3, -1), + ((1, 5, 5, 1, 5, 7, 5, 1), -2, -1), + ((2, 4, 2), 0, 1), + ((4, 2, 2), 1, 2), + ((0, 3, 4, 5), 1, 3), + ) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for shape, start, end in cases: + yield SampleInput(make_arg(shape), args=(start, end,)) + yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), args=(start, end,)) + yield SampleInput(make_arg(shape).transpose(0, -1), args=(start, end,)) + +def sample_inputs_unflatten(op_info, device, dtype, requires_grad, **kwargs): + # in_shape, dim, sizes + args = (((8,), 0, (8,)), + ((8,), 0, (4, 2)), + ((8,), -1, (2, 2, 2)), + ((8,), -1, (-1, 2)), + ((3, 6, 2), 1, (2, 3)), + ((3, 6, 2), -2, (2, 3)), + ((3, 6, 2), -2, (-1, 3)), + ((3, 2, 12), 2, (3, 2, 2)), + ((4, 0), 0, (2, 2)), + ((4, 0), 1, (2, 0, 0, 0)), + ) + make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + for in_shape, dim, sizes in args: + yield SampleInput(make_tensor_partial(in_shape), args=(dim, sizes)) + + +def sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, S, S), (1, 2)), + ((S, S, S), (-1, 2)), + ((S, S, S), (-1, -1)), + ((S, S, S), (1, -1)), + ((S,), (0, 2)) + ) + + for shape, args in cases: + yield SampleInput(make_arg(shape), args=args) + + +def sample_inputs_select_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, S, S), (S, S), (1, 2)), + ((S, S, S), (S, S), (-1, 2)), + ((S, S, S), (S, S), (-1, -1)), + ((S, S, S), (S, S), (1, -1)), + ((S,), (), (0, 2)) + ) + + for input_shape, src_shape, args in cases: + input_ = make_arg(input_shape) + src = make_arg(src_shape) + yield SampleInput(input_, args=(src, *args)) + + +def sample_inputs_slice_scatter(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((L, L, L), (L, L, L,), (0, 0, L, 1)), + ((L, L, L), (L // 2, L, L,), (0, L // 2, L, 1)), + ((L, L, L), (L // 4, L, L,), (0, L // 2, L, 2)), + ((L, L, L), (L, L, L,), (1, 0, L, 1)), + ((L, L, L), (L, L // 2, L,), (1, L // 2, L, 1)), + ((L, L, L), (L, L // 4, L,), (1, L // 2, L, 2)), + ((L, L, L), (L, L, L,), (2, 0, L, 1)), + ((L, L, L), (L, L, L // 2,), (2, L // 2, L, 1)), + ((L, L, L), (L, L, L // 4,), (2, L // 2, L, 2)), + ) + + for input_shape, src_shape, args in cases: + input_ = make_arg(input_shape) + src = make_arg(src_shape) + yield SampleInput(input_, args=(src, *args)) + +def sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, 1, 1), (S, S, S)), + ((S, 1, S), (S, S, S)), + ((S, 1, S), (-1, S, -1)), + ((S, 1, S), (-1, S, S)), + ((S, 1), (S, S, S)), + ((1,), (S, S, S)), + ((1, S), (1, 1, S)), + ((), ()), + ((), (1, 3, 2)), + ) + + for case in cases: + shape, args = case + yield SampleInput(make_arg(shape), args=(args,)) + +def sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + shapes = ((), + (2, 3)) + memory_format_options = [None, torch.contiguous_format] + + for shape, memory_format in itertools.product(shapes, memory_format_options): + yield SampleInput(make_arg(shape), + kwargs={'memory_format': memory_format} if memory_format else {}) + yield SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last}) + +def sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device) + + cases = (((S, 1, 1), (S, S, S)), + ((), ()), + ((), (1, 1)), + ) + + for shape, shape_other in cases: + yield SampleInput(make_arg(shape, requires_grad=requires_grad), + args=(make_arg(shape_other, requires_grad=False),)) + + +def sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + def make_bool_mask(shape): + # Make sure atleast one element is nonzero, + # except for empty tensor + mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) + + if mask_t.numel() == 0: + return mask_t + elif mask_t.numel() == 1: + mask_t.fill_(True) + return mask_t + + if mask_t.sum() == 0: + def random_index(shape): + return tuple(random.randrange(0, max_idx) for max_idx in shape) + + mask_t[random_index(mask_t.shape)] = True + return mask_t + + return mask_t + + cases = (((M, M), (M, M), (M, M), False), + ((M, 1, M), (M, M), (M, M, 1), True), + ((), (), (), False), + ((M, 1, M), (), (M, M, 1), True), + ((), (M, M), (), True), + ((), (2), (1, 1), True), + ) + + for shape, mask_shape, other_shape, broadcasts_input in cases: + yield SampleInput(make_arg(shape), + args=(make_bool_mask(mask_shape), make_arg(other_shape)), + broadcasts_input=broadcasts_input) + +# TODO: add reference inputs for where(condition) signature +def reference_inputs_where(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_where(op, device, dtype, requires_grad, **kwargs) + + make_cond = partial(make_tensor, dtype=torch.bool, device=device, requires_grad=requires_grad) + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # noncontiguous + c = make_cond((10, 3), noncontiguous=True) + a = make_arg((10, 1), noncontiguous=True) + b = make_arg((3, 10, 3)).transpose(0, -1) + + # NOTE that the OpInfo for where takes samples of the form a, cond, b + yield SampleInput(a, args=(c, b)) + + # type promoting + other_dtype = torch.double if dtype is not torch.double else torch.long + c = make_cond((10, 3), noncontiguous=True) + a = make_arg((10, 1), dtype=torch.long) + b = make_arg((10, 1)) + + yield SampleInput(a, args=(c, b)) + + # two python scalars + c = make_cond((10, 3), noncontiguous=True) + a = make_arg((1,)).item() + b = make_arg((1,)).item() + + yield SampleInput(a, args=(c, b)) + + # NaN propagation + if dtype.is_floating_point or dtype.is_complex: + if dtype.is_floating_point: + nan = float('nan') + else: + # dtype.is_complex + nan = complex(float('nan'), float('nan')) + c = make_cond((1, 10, 3)) + a = make_arg((10, 3), noncontiguous=True) + a[2, 1] = nan + b = make_arg((1, 3)) + b[0, 2] = nan + + yield SampleInput(a, args=(c, b)) + + # Python scalars type promotion + for scalar in (0, 0.0, 2j, False): + yield SampleInput(scalar, args=(c, b)) + yield SampleInput(a, args=(c, scalar)) + + +def error_inputs_where(op_info, device, **kwargs): + shape = (S,) + err_msg = "Expected all tensors to be on the same device" + for devices in product(('cpu', device), repeat=3): + if len(set(devices)) == 2: + si = SampleInput(make_tensor(shape, device=devices[0], dtype=torch.float32), + args=(make_tensor(shape, dtype=torch.bool, device=devices[1]), + make_tensor(shape, device=devices[2], dtype=torch.float32))) + yield ErrorInput(si, error_regex=err_msg) + +def sample_inputs_nonzero(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + inputs = [] + for shape in sizes: + # construct input without any non-zero elements + zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) + inputs.append(zeros) + + # construct input with mixed zero and non-zero elements + mixed = make_arg(shape).requires_grad_(False) + mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) + mixed[mask_t] = 0 + inputs.append(mixed) + + for input_t, as_tuple in product(inputs, [False, True]): + yield SampleInput(input_t.clone().requires_grad_(requires_grad), + kwargs=dict(as_tuple=as_tuple)) + +def sample_inputs_nonzero_static(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) + + inputs = [] + for shape in sizes: + # construct input without any non-zero elements + zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) + inputs.append(zeros) + + # construct input with mixed zero and non-zero elements + mixed = make_arg(shape).requires_grad_(False) + mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) + mixed[mask_t] = 0 + inputs.append(mixed) + + nonzero_sizes = [0, 1, XS, S, M] + + for input_t, nonzero_size in product(inputs, nonzero_sizes): + yield SampleInput(input_t.clone().requires_grad_(requires_grad), + kwargs=dict(size=nonzero_size)) + +def sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + cases = (((S, S, S), (2,)), + ((S, S, S), (S, 1)), + ((S, S, S), (S, -1))) + + for case in cases: + shape, args = case + yield SampleInput(make_arg(shape), args=args) + +def reference_inputs_chunk(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_chunk(op, device, dtype, requires_grad, **kwargs) + + make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) + + # shape x chunks x dim + cases = ( + ((13, 9, 11), 17, -1), + ((13, 9, 11), 11, -1), + ((13,), 12, -1), + ((15,), 12, -1), + ((15,), 7, 0), + ((15,), 9, 0), + ((3, 7), 9, 1), + ((3, 7), 9, 0), + ((3, 7), 2, 0), + ((3, 7), 3, 0), + ((3, 7), 1, 0), + ((3, 7), 1, 1), + ((4, 4), 2, 0), + ) + + for shape, chunks, dim in cases: + yield SampleInput(make_arg(shape), args=(chunks, dim)) + +def sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs): + def _tensor(shape, dtype=dtype, low=None, high=None): + return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) + + test_cases = [ + ((S, S, S), (2,)), + ((S, S, S), (2, 1,)), + ((S, S, S), (2, -1,)), + ((S, S, S), (2, 1, True,)), + ((S, S, S), (2, -1, True,)), + ((S,), (2, 0,)), + ((S,), (2, 0, True,)), + ((), (1,)), + ((), (1, 0,)), + ((), (1, 0, True)), + ] + + yield from (SampleInput(_tensor(tensor), *args) for tensor, args in test_cases) + +def error_inputs_kthvalue(op_info, device, **kwargs): + # tests overlapping output fails + t = make_tensor(10, dtype=torch.float32, device=device) + indices = torch.empty((), device=device, dtype=torch.long) + yield ErrorInput(SampleInput(t, 5, out=(t, indices)), + error_regex="unsupported operation") + + k_out_of_range_err = "selected number k out of range for dimension" + yield ErrorInput(SampleInput(torch.randn(2, 2, device=device), 3, 0), + error_regex=k_out_of_range_err) + yield ErrorInput(SampleInput(torch.randn(2, 2, device=device), 3), + error_regex=k_out_of_range_err) + yield ErrorInput(SampleInput(torch.tensor(2, device=device), 3), + error_regex=k_out_of_range_err) + +def sample_inputs_dropout(op_info, device, dtype, requires_grad, *, + train=None, valid_input_dim=None, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if valid_input_dim: + cases = ((S,) * i for i in valid_input_dim) + else: + cases = ((S, S), (S,), ()) + p_vals = [0.0, 0.5, 1.0] + # This is to handle special case for feature_alpha_dropout which has different + # supported dtypes depending on `train` parameter + training_vals = [train] if train is not None else [True, False] + + for case, p, training in product(cases, p_vals, training_vals): + yield SampleInput(make_arg(case), p=p, training=training) + yield SampleInput(make_arg(case)) + +def sample_inputs_dropout_backward(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_mask = partial(make_tensor, device=device, dtype=torch.bool, requires_grad=False) + + cases = ((S, S, S, S), (S,), ()) + scale_vals = [0.0, 1.0, 2.0] + + for case, scale in product(cases, scale_vals): + yield SampleInput(make_arg(case), make_mask(case), scale) + +def sample_inputs_embedding_bag(op_info, device, dtype, requires_grad, **kwargs): + def make_input(shape): + return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_long_input(shape, *, low, high, noncontiguous=False): + return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high, + noncontiguous=noncontiguous) + + def make_per_sample_weight(flag, idx): + # a tensor of float / double weights, or None + # to indicate all weights should be taken to be 1 + if flag: + return make_input(idx.shape) + return None + + offsets = torch.tensor([0, 3], device=device, dtype=torch.long) + for generate_per_sample_weight in (True, False): + for mode in ('sum', 'mean', 'max'): + # per_sample_weights is only supported for mode='sum' (got mode='****') + if generate_per_sample_weight and mode in ('mean', 'max'): + continue + + # 1-D index tensor + idx = make_long_input((S,), low=0, high=M) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'offsets': offsets, 'mode': mode, + 'per_sample_weights': per_sample_weights}) + + idx = make_long_input((S,), low=0, high=M, noncontiguous=True) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'offsets': offsets, 'mode': mode, + 'per_sample_weights': per_sample_weights}) + + # bag with zero length + idx = make_long_input((S,), low=0, high=M, noncontiguous=True) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'offsets': torch.tensor([0, 0, 3], device=device, dtype=torch.long), + 'mode': mode, + 'per_sample_weights': per_sample_weights}) + + # 2-D index tensor + idx = make_long_input((S, S), low=0, high=M) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) + + idx = make_long_input((S, S), low=0, high=M, noncontiguous=True) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((M, S)), args=(idx,), + kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) + + # The gradient vector at `padding_idx` is not updated. + # Negative padding_idx + idx = make_long_input((6,), low=0, high=S) + idx[0] = 4 + idx[4] = 4 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((S, S)), args=(idx,), + kwargs={'padding_idx': -1, 'offsets': offsets, + 'mode': mode, 'per_sample_weights': per_sample_weights},) + + idx = make_long_input((3, 3), low=0, high=S) + # Positive padding_idx + idx[0, 0] = 2 + idx[1, 1] = 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(make_input((S, S)), args=(idx,), + kwargs={'padding_idx': 2, 'mode': mode, + 'per_sample_weights': per_sample_weights},) + + idx = make_long_input((6, ), low=0, high=S) + weights = make_input((S, S)) + offsets_ = torch.tensor([0, 3, 6], device=device, dtype=torch.long) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'mode': mode, 'offsets': offsets_, 'include_last_offset': True},) + + if not requires_grad: + # Following inputs return different gradient from the numerical gradient. + # This is expected and relevant tests are present in `test_nn.py`. + + # Due to inplace renorming of weight, the numerical gradient doesn't match the + # analytical gradient. + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) * 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'max_norm': 1., 'mode': mode, + 'per_sample_weights': per_sample_weights},) + + idx = make_long_input((6, ), low=0, high=S) + weights = make_input((S, S)) * 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'max_norm': 1., 'norm_type': 1.0, + 'mode': mode, 'offsets': offsets, + 'per_sample_weights': per_sample_weights},) + + if mode != 'max': + # Scale the gradient based on the inverse frequency of a particular index. + # Note : smax mode does not support sparse weights + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 1 + idx[0, 1] = 1 + weights = make_input((S, S)) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'scale_grad_by_freq': True, 'mode': mode, + 'per_sample_weights': per_sample_weights},) + + # gradcheck not implemented for sparse tensors. + # Note : max mode does not support sparse weights + idx = make_long_input((6, ), low=0, high=S) + weights = make_input((S, S)) + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'sparse': True, 'offsets': offsets, + 'mode': mode, 'per_sample_weights': per_sample_weights}) + + idx = make_long_input((6, ), low=0, high=S) + idx[0] = 1 # freq more than 1 + idx[1] = 1 # freq more than 1 + idx[3] = 0 # padding_idx + weights = make_input((S, S)) * 2 + per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) + yield SampleInput(weights, args=(idx,), + kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0, + 'max_norm': 1., 'offsets': offsets, + 'mode': mode, 'per_sample_weights': per_sample_weights}) + + +def sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs): + def make_input(shape): + return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_long_input(shape, *, low, high): + return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high) + + # 0-D index tensor + idx = make_long_input((), low=0, high=M) + yield SampleInput(make_input((M, S)), args=(idx,),) + + # 1-D index tensor + idx = make_long_input((S,), low=0, high=M) + yield SampleInput(make_input((M, S)), args=(idx,),) + + # 2-D index tensor + idx = make_long_input((S, S), low=0, high=M) + yield SampleInput(make_input((M, S)), args=(idx,),) + + if not requires_grad: + # Following inputs return different gradient from the numerical gradient. + # This is expected and relevant tests are present in `test_nn.py`. + + # The gradient vector at `padding_idx` is not updated. + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 2 + idx[1, 1] = 2 + yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},) + + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 4 + idx[1, 1] = 4 + yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},) + + # Due to inplace renorming of weight, the numerical gradient doesn't match the + # analytical gradient. + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) * 2 + yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1.},) + + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) * 2 + yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0},) + + # Scale the gradient based on the inverse frequency of a particular index. + idx = make_long_input((2, 2), low=0, high=S) + idx[0, 0] = 1 + idx[0, 1] = 1 + weights = make_input((S, S)) + yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},) + + # gradcheck not implemented for sparse tensors. + idx = make_long_input((2, 2), low=0, high=S) + weights = make_input((S, S)) + yield SampleInput(weights, args=(idx,), kwargs={'sparse': True}) + + idx = make_long_input((3, 3), low=0, high=S) + idx[0, 0] = 1 # freq more than 1 + idx[0, 1] = 1 # freq more than 1 + idx[1, 0] = 0 # padding_idx + weights = make_input((S, S)) * 2 + yield SampleInput(weights, args=(idx,), + kwargs={'sparse': True, 'scale_grad_by_freq': True, + 'padding_idx': 0, 'max_norm': 1.}) + + +def sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs): + def make_input(shape, *, low, high): + return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad) + + shapes = ((), (S,), (L, M, S)) + num_classess = (-1, 10) + + return ( + SampleInput( + make_input( + shape, + low=0, + high=10 if num_classes == -1 else num_classes // 2, + ), + kwargs=dict(num_classes=num_classes), + ) + for shape, num_classes in itertools.product(shapes, num_classess) + ) + + +def sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs): + rhs_requires_grad = kwargs.get('rhs_requires_grad', requires_grad) + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + # Although most losses also support the reduce and size_average combination instead of reduce, the former is + # deprecated since 0.4.1 and thus is not tested + shapes_and_kwargs = ( + ((), None), + ((S,), dict(reduction="mean")), + ((S,), dict(reduction="sum")), + ((S,), dict(reduction="none")), + ((S, S), None), + ((S, S, S), None), + ) + + for shape, kwargs in shapes_and_kwargs: + yield SampleInput(_make_tensor(shape), + args=(_make_tensor(shape, requires_grad=rhs_requires_grad),), + kwargs=kwargs) + +def sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs): + # We get better tests if we change the range of the values to something like [-2,2] + # because for grid (second tensor argument) the "useful" range is [-1,1] and this way + # you get a better combination of out-of-range and in-range test cases + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, + low=-2, high=2) + + batch_size = 2 + num_channels = 3 + modes = ("bilinear", "nearest") + align_cornerss = (False, True) + padding_modes = ("zeros", "border", "reflection") + + for dim in (2, 3): + + modes_ = (*modes, "bicubic") if dim == 2 else modes + + for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss): + yield SampleInput( + _make_tensor((batch_size, num_channels, *[S] * dim)), + _make_tensor((batch_size, *[S] * dim, dim)), + mode=mode, + padding_mode=padding_mode, + align_corners=align_corners, + ) + +def reference_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs): + + batch_size = 2 + num_channels = 3 + height = 345 + width = 456 + modes = ("bilinear", "nearest", "bicubic") + align_cornerss = (False, True) + padding_modes = ('zeros', 'border', 'reflection') + + # Create an affine transformation matrix + a = torch.deg2rad(torch.tensor(45.0)) + ca, sa = torch.cos(a), torch.sin(a) # rotation angles + s1, s2 = 1.23, 1.34 # scales + + theta = torch.tensor([[ + [ca / s1, sa, 0.0], + [-sa, ca / s2, 0.0], + ]], dtype=dtype, device=device) + theta = theta.expand(batch_size, 2, 3).contiguous() + + x = torch.arange(batch_size * num_channels * height * width, device=device) + x = x.reshape(batch_size, num_channels, height, width).to(torch.uint8) + x = x.to(dtype=dtype) + x.requires_grad_(requires_grad) + + for mode, padding_mode, align_corners in itertools.product(modes, padding_modes, align_cornerss): + grid = torch.nn.functional.affine_grid( + theta, size=(batch_size, num_channels, height, width), align_corners=align_corners + ) + yield SampleInput( + x, + grid, + mode, + padding_mode, + align_corners, + ) + +def sample_inputs_grid_sampler_2d(op_info, device, dtype, requires_grad, **kwargs): + # We get better tests if we change the range of the values to something like [-2,2] + # because for grid (second tensor argument) the "useful" range is [-1,1] and this way + # you get a better combination of out-of-range and in-range test cases + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, + low=-2, high=2) + + batch_size = 2 + num_channels = 3 + modes = (0, 1, 2) + align_cornerss = (False, True) + padding_modes = (0, 1, 2) + + for mode, padding_mode, align_corners in itertools.product(modes, padding_modes, align_cornerss): + yield SampleInput( + _make_tensor((batch_size, num_channels, S, L)), + _make_tensor((batch_size, M + 3, M, 2)), + mode, + padding_mode, + align_corners, + ) + +def sample_inputs_cosine_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_target(shape): + shape = () if len(shape) == 1 else (shape[0], ) + t = torch.randint(0, 2, shape, device=device, dtype=torch.long) + # Label with -1 or 1 + t = t * 2 - 1 + target = t.to(dtype=dtype).detach_().requires_grad_(requires_grad) + return target + + shapes = ((S, S), (S,)) + reductions = ('none', 'mean', 'sum') + for s, r in product(shapes, reductions): + yield SampleInput( + make_input(s), + args=(make_input(s), make_target(s)), + kwargs=dict(reduction=r, margin=random.uniform(-1, 1)) + ) + +def sample_inputs_ctc_loss(op_info, device, dtype, requires_grad, **kwargs): + input_length = 50 + batch = 16 + num_char = 20 + target_length = 30 + + def make_log_probs(s): + t = make_tensor(s, device=device, dtype=dtype) + log_probs = t.log_softmax(2).to(device=device, dtype=dtype).detach().requires_grad_(requires_grad=requires_grad) + return log_probs + + reductions = ('none', 'mean', 'sum') + zero_inf = (True, False) + lengths_type = (list, torch.Tensor) + for r, z, lt in product(reductions, zero_inf, lengths_type): + log_probs = make_log_probs((input_length, batch, num_char)) + targets = torch.randint(1, num_char, (batch, target_length), dtype=torch.long, device=device) + input_lengths = torch.full((batch, ), input_length, dtype=torch.long, device=device) + target_lengths = torch.randint(10, target_length, (batch, ), dtype=torch.long, device=device) + + # Dont generate int[] types if reduction = "Mean" since this results in non composite compliant calls + # to ctc_loss.IntList since a tensor needs to be created from the target lengths. + # Creating such a tensor requires the use of pointers to copy data from int[] -> torch.Tensor + # e.g. via std::copy. Similarly symbolic/real tracing with fx will also not work + if lt is list and r in ["none", "sum"]: + input_lengths = input_lengths.tolist() + target_lengths = target_lengths.tolist() + + yield SampleInput(log_probs, args=(targets, input_lengths, target_lengths,), kwargs=dict(reduction=r, zero_infinity=z)) + +def sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs): + shape = (2, 3) + num_classes = shape[1] + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # FIXME: Derivative wrt. weight not implemented + make_weight = partial(make_tensor, num_classes, device=device, dtype=dtype, requires_grad=False) + + def make_target(shape, zeros=False): + s = (shape[0], *shape[2:]) if len(shape) > 1 else () + if zeros: + return torch.zeros(s, device=device, dtype=torch.long) + else: + return make_tensor(s, + low=0, + high=shape[1] if len(shape) > 1 else shape[0], + device=device, + dtype=torch.long) + + + def gen_shape_kwargs(): + # Batched, non-batched and 2d + shapes = (shape, (num_classes,), shape + (2, 2)) + reductions = ('none', 'mean', 'sum') + for reduction, s in product(reductions, shapes): + yield make_input(s), make_target(s), dict(reduction=reduction) + yield make_input(s), make_target(s), dict(weight=make_weight(), reduction=reduction) + yield make_input(s), make_target(s), dict(weight=make_weight(low=0), reduction=reduction) + yield make_input(s), make_target(s), dict(weight=make_weight(high=0), reduction=reduction) + t = make_target(s) + ignore = num_classes // 2 + # If "mean", nll returns NaN, so it's not differentiable at those points + if t.eq(ignore).all() and reduction == "mean": + t.fill_(0) + yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction) + yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction, weight=make_weight()) + # Test ignoring all the targets + # If "mean", nll returns NaN, so it's not differentiable at those points + if reduction != "mean": + yield make_input(s), make_target(s, zeros=True), dict(ignore_index=0, reduction=reduction) + + for input, target, kwargs in gen_shape_kwargs(): + yield SampleInput(input, args=(target,), kwargs=kwargs) + + target = torch.tensor([-1, 2], device=device, dtype=torch.long) + yield SampleInput(make_input(shape), args=(target,), kwargs={'ignore_index': -1}) + + +def sample_inputs_binary_cross_entropy_with_logits( + op_info, device, dtype, requires_grad, **kwargs +): + make = partial(make_tensor, device=device, dtype=dtype) + make_prob = partial(make, low=0, high=1) + reductions = ("mean", "sum", "none") + + def make_weight_shape_kwargs(): + kwargs = [] + for shape in ((1,), (1, S), (S), (S, S)): + kwargs.extend([((S, S), dict(reduction=reduction, weight=make(shape))) for reduction in reductions]) + return kwargs + + shapes_and_kwargs = [ + *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], + *[((S, S), dict(reduction=reduction)) for reduction in reductions], + *make_weight_shape_kwargs(), + *[((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions], + *[((S, S), dict(reduction=reduction, weight=make((S, S)), pos_weight=make((S,), low=0))) for reduction in reductions], + ] + + for shape, kwargs in shapes_and_kwargs: + yield SampleInput( + make(shape, requires_grad=requires_grad), + args=(make_prob(shape, requires_grad=requires_grad),), + kwargs=kwargs, + ) + +def sample_inputs_argwhere(op_info, device, dtype, requires_grad, **kwargs): + yield SampleInput(torch.tensor([1, 0, 2, 0], dtype=dtype, device=device, requires_grad=requires_grad)) + mask = torch.tensor([[0, 1, 0, 1, 0], + [1, 1, 1, 1, 0], + [0, 0, 0, 1, 0], + [1, 0, 1, 1, 0], + [1, 0, 0, 1, 0]], dtype=torch.bool, device=device) + t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad) + t[mask] = 0 + yield SampleInput(t) + + t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) + t[mask] = 0 + yield SampleInput(t) + + t = make_tensor((S, 0), dtype=dtype, device=device, requires_grad=requires_grad) + yield SampleInput(t) + + yield SampleInput(torch.zeros((S,), dtype=dtype, device=device, requires_grad=requires_grad)) + yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad)) + +def _generate_sample_shape_reduction(): + shapes = ((S,), (S, S), (S, S, S)) + reductions = ('none', 'mean', 'sum') + yield from product(shapes, reductions) + +def sample_inputs_gaussian_nll_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + # Set low slightly above 0 so gradcheck doesn't accidentally dip below 0 + make_var = partial(make_tensor, low=0.1, device=device, dtype=dtype, requires_grad=requires_grad) + + def gen_shape(shape): + yield shape + # Broadcast + yield (*shape[:-1], 1) + yield shape[:-1] + + def gen_shape_kwargs(): + for s, r in _generate_sample_shape_reduction(): + for t_s, v_s in product(gen_shape(s), gen_shape(s)): + yield _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(reduction=r) + yield ( + _make_tensor(s), _make_tensor(t_s), make_var(v_s), + dict(full=True, reduction=r) + ) + yield ( + _make_tensor(s), _make_tensor(t_s), make_var(v_s), + dict(eps=random.uniform(1e-6, 1e-3), reduction=r) + ) + yield ( + _make_tensor(s), _make_tensor(t_s), make_var(v_s), + dict(full=True, eps=random.uniform(1e-6, 1e-3), reduction=r) + ) + + for input, target, var, kwargs in gen_shape_kwargs(): + yield SampleInput(input, args=(target, var, ), kwargs=kwargs) + +def error_inputs_gaussian_nll_loss(op_info, device, **kwargs): + _make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 3), _make((10, 2, 3), low=0), reduction="abc"), + error_type=ValueError, error_regex="abc is not valid") + + # var is of incorrect shape + yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 3), _make((10, 2, 2), low=0)), + error_type=ValueError, error_regex="var is of incorrect size") + + # target is of incorrect shape + yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 2), _make((10, 2, 3), low=0)), + error_type=RuntimeError, + error_regex=(r"The size of tensor a \(3\) must match the size of tensor b \(2\) " + r"at non-singleton dimension 2")) + +def _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for s, r in _generate_sample_shape_reduction(): + yield _make_tensor(s), _make_tensor(s), dict(reduction=r) + +def sample_inputs_hinge_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): + for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): + # target should contain either 1 or -1 as per docs + mask = torch.rand_like(target) > 0.5 + target[mask] = 1 + target[~mask] = -1 + d['margin'] = random.uniform(-9, 9) + yield SampleInput(input, args=(target, ), kwargs=d) + + # scalar input and target. + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(_make_tensor(()), args=(_make_tensor(()), )) + +def error_inputs_hinge_embedding_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction value + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex='is not a valid value') + +def reference_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs) + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + for reduction in ('sum', 'mean', 'none'): + if dtype.is_floating_point: # only supports ints and floats + # NaN propagation + inp = make_input((10, )) + inp[2] = float('nan') + target = make_input((10, )) + # target should contain either 1 or -1 as per docs + mask = torch.rand_like(target) > 0.5 + target[mask] = -1 + target[~mask] = 1 + yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) + + # Inf Handling + inp = make_input((10, )) + inp[4] = float('inf') + target = make_input((10, )) + mask = torch.rand_like(target) > 0.5 + target[mask] = -1 + target[~mask] = 1 + yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) + + # Broadcasting + inp = make_input((5, 5)) + target = make_input((1, 5)) + mask = torch.rand_like(target) > 0.5 + target[mask] = -1 + target[~mask] = 1 + yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) + +def sample_inputs_huber_loss(op_info, device, dtype, requires_grad, **kwargs): + for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): + d['delta'] = random.uniform(1e-3, 9) + yield SampleInput(input, args=(target, ), kwargs=d) + +def error_inputs_huber_loss(op, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + # invalid reduction value + err = 'is not a valid value for reduction' + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), + error_type=ValueError, error_regex=err) + # delta <= 0 + for delta in (0, -1): + err = 'huber_loss does not support non-positive values for delta.' + yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'delta': delta}), + error_type=RuntimeError, error_regex=err) + +def sample_inputs_poisson_nll_loss(op_info, device, dtype, requires_grad, **kwargs): + _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def gen_shape_kwargs(): + for s, r in _generate_sample_shape_reduction(): + for li in (True, False): + for f in (True, False): + i1 = _make_tensor(s) + i2 = _make_tensor(s) + # For Poisson NLL Loss, + # target is assumed to be from + # Poisson Distribution which + # always has positive samples + t1 = _make_tensor(s, low=0) + t2 = _make_tensor(s, low=0) + + if not li: + i1.abs_() + i2.abs_() + t1.abs_() + t2.abs_() + + yield ( + i1, t1, + dict(log_input=li, full=f, reduction=r) + ) + yield ( + i2, t2, + dict(log_input=li, full=f, + eps=random.uniform(1e-8, 1e-3), + reduction=r) + ) + + for input, target, kwargs in gen_shape_kwargs(): + yield SampleInput(input, args=(target, ), kwargs=kwargs) + + # test INT_TO_FLOAT promotion + if dtype.is_complex: + for d in (torch.bool, torch.int64): + yield SampleInput(_make_tensor(dtype=dtype), args=(_make_tensor(dtype=d),)) + yield SampleInput(_make_tensor(dtype=d), args=(_make_tensor(dtype=dtype),)) + +def error_inputs_poisson_nll_loss(op_info, device, **kwargs): + make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), + kwargs={'reduction': 'abc'}), + error_type=ValueError, + error_regex='abc is not a valid value for reduction') + # invalid input shapes + yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), + error_regex=(r'(Attempting to broadcast a dimension of length|' + r'The size of tensor a \(5\) must match the ' + r'size of tensor b \(4\) at non-singleton ' + r'dimension 1)')) + +def error_inputs_soft_margin_loss(op_info, device, **kwargs): + make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), + kwargs={'reduction': 'abc'}), + error_type=ValueError, + error_regex='abc is not a valid value for reduction') + # invalid input shapes + yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), + error_regex=(r'(Attempting to broadcast a dimension of length|' + r'The size of tensor a \(4\) must match the ' + r'size of tensor b \(5\) at non-singleton ' + r'dimension 1)')) + +def sample_inputs_triplet_margin_loss(op_info, device, dtype, requires_grad, with_distance=False, **kwargs): + make = partial(make_tensor, (S, M), device=device, dtype=dtype, requires_grad=requires_grad) + + kwargss = ( + *[dict(margin=margin) for margin in (1e-6, 1.0, 10.0)], + dict(swap=True), + *[dict(reduction=reduction) for reduction in ("mean", "sum", "none")], + ) + + for kwargs in kwargss: + input = make() + args = (make(), make()) + if with_distance: + kwargs["distance_function"] = torch.nn.PairwiseDistance() + yield SampleInput(input, args=args, kwargs=kwargs) + +def error_inputs_triplet_margin_loss(op_info, device, **kwargs): + make_input = partial(make_tensor, device=device, dtype=torch.float32) + + samples = ( + # input, args, kwargs, error_type, error_regex + # invalid reduction + (make_input(3, 4), (make_input(3, 4), make_input(3, 4)), + dict(reduction="abc"), + ValueError, "abc is not a valid value for reduction"), + + # shape mismatch + (make_input(3, 5), (make_input(3, 4), make_input(3, 4)), + dict(), + RuntimeError, + (r'(Attempting to broadcast a dimension of length|' + r"The size of tensor a \(5\) must match the size of tensor b \(4\) " + r"at non-singleton dimension 1)")), + (make_input(3, 4), (make_input(3, 5), make_input(3, 4)), + dict(), + RuntimeError, + (r'(Attempting to broadcast a dimension of length|' + r"The size of tensor a \(4\) must match the size of tensor b \(5\) " + r"at non-singleton dimension 1)")), + (make_input(3, 4), (make_input(3, 4), make_input(3, 5)), + dict(), + RuntimeError, + (r'(Attempting to broadcast a dimension of length|' + r"The size of tensor a \(4\) must match the size of tensor b \(5\) " + r"at non-singleton dimension 1)")), + + # different dimensions + (make_input(3,), (make_input(3, 4), make_input(3, 4)), + dict(), + RuntimeError, + (r"The anchor, positive, and negative tensors are expected to have " + r"the same number of dimensions, but got: anchor 1D, positive 2D, " + r"and negative 2D inputs")), + (make_input(3, 4), (make_input(3,), make_input(3, 4)), + dict(), + RuntimeError, + (r"The anchor, positive, and negative tensors are expected to have " + r"the same number of dimensions, but got: anchor 2D, positive 1D, " + r"and negative 2D inputs")), + (make_input(3, 4), (make_input(3, 4), make_input(3,)), + dict(), + RuntimeError, + (r"The anchor, positive, and negative tensors are expected to have " + r"the same number of dimensions, but got: anchor 2D, positive 2D, " + r"and negative 1D inputs")), + ) + + for input, args, kwargs, error_type, error_regex in samples: + yield ErrorInput(SampleInput(input, args=args, kwargs=kwargs), + error_type=error_type, error_regex=error_regex) + +def sample_inputs_scaled_mm(op_info, device, dtype, requires_grad, **kwargs): + make_mat_e4m3 = partial(make_tensor, device=device, dtype=torch.float8_e4m3fn, requires_grad=requires_grad) + make_mat_e5m2 = partial(make_tensor, device=device, dtype=torch.float8_e5m2, requires_grad=requires_grad) + M, N, K = 15, 32, 16 + samples = [] + # two e4m3 + mat1 = make_mat_e4m3((M, K)) + mat2 = make_mat_e4m3((K, N)).t().contiguous().t() + samples.append(SampleInput(mat1, mat2)) + # mat1 e4m3 mat2 e5m2 + mat1 = make_mat_e4m3((M, K)) + mat2 = make_mat_e5m2((K, N)).t().contiguous().t() + samples.append(SampleInput(mat1, mat2)) + # mat1 e5m2 mat2 e4m3 + mat1 = make_mat_e5m2((M, K)) + mat2 = make_mat_e4m3((K, N)).t().contiguous().t() + samples.append(SampleInput(mat1, mat2)) + + yield from samples + +def sample_inputs_scaled_dot_product_attention(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batch, seq_q, seq_kv, num_heads, head_dim = 4, 3, 6, 4, 8 + + dim_3_q_shape = (batch, seq_q, head_dim) + dim_3_kv_shape = (batch, seq_kv, head_dim) + dim_4_q_shape = (batch, num_heads, seq_q, head_dim) + dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) + + broadcast_tuple = ((num_heads, seq_q, head_dim), (batch, num_heads, seq_kv, head_dim)) + + qkv_shapes = [(dim_3_q_shape, dim_3_kv_shape), (dim_4_q_shape, dim_4_kv_shape), broadcast_tuple] + samples = [] + for qkv_shape, is_causal, dropout_p in product( + qkv_shapes, [True, False], [0.0, 0.5]): + shape_q, shape_kv = qkv_shape + samples.append(SampleInput( + make(shape_q), + make(shape_kv), + make(shape_kv), + is_causal=is_causal, + dropout_p=dropout_p + )) + + # Add non standard shapes + diff_v_head_dim = SampleInput( + make((batch, num_heads, seq_q, head_dim)), + make((batch, num_heads, seq_kv, head_dim)), + make((batch, num_heads, seq_kv, head_dim + 8)), + is_causal=is_causal, + dropout_p=dropout_p + ) + + # Add an attn_mask + samples.append( + SampleInput( + make((batch, num_heads, seq_q, head_dim)), + make((batch, num_heads, seq_kv, head_dim)), + make((batch, num_heads, seq_kv, head_dim)), + attn_mask=make((seq_q, seq_kv)), + is_causal=False, + dropout_p=0.0) + ) + + yield from samples + + +def sample_inputs_efficient_attention_forward(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batch, num_heads, head_dim = 4, 4, 8 + seq_q = 11 + seq_kv = 32 + + dim_4_q_shape = (batch, num_heads, seq_q, head_dim) + dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) + + qkv_shapes = [(dim_4_q_shape, dim_4_kv_shape)] + samples = [] + mask_types = [1, 2] # UpperLeft, LowerRight + scales = [None, 1.0] + + for qkv_shape, is_causal, dropout_p, mask_type, scale in product( + qkv_shapes, [True, False], [0.0, 0.5], mask_types, scales): + shape_q, shape_kv = qkv_shape + samples.append(SampleInput( + make(shape_q).transpose(1, 2), + make(shape_kv).transpose(1, 2), + make(shape_kv).transpose(1, 2), + bias=None, + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + dropout_p=dropout_p, + custom_mask_type=mask_type, + compute_log_sumexp=requires_grad, + scale=scale, + causal_diagonal=None, + seqlen_k=None + )) + + # Add non standard shapes + diff_v_head_dim = SampleInput( + make((batch, seq_q, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim + 8)), + bias=None, + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + dropout_p=dropout_p, + custom_mask_type=0, # No Mask + compute_log_sumexp=requires_grad, + scale=None, + causal_diagonal=None, + seqlen_k=None + ) + + # Add an attn_mask + samples.append( + SampleInput( + make((batch, seq_q, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim)), + make((batch, seq_kv, num_heads, head_dim)), + bias=make(batch, num_heads, seq_q, seq_kv), + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + dropout_p=dropout_p, + custom_mask_type=0, # No Mask + compute_log_sumexp=requires_grad, + scale=None, + causal_diagonal=None, + seqlen_k=None + ) + ) + + yield from samples + +def sample_inputs_flash_attention_forward(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + batch, num_heads, head_dim = 4, 4, 8 + seq_q = 11 + seq_kv = 32 + + dim_4_q_shape = (batch, num_heads, seq_q, head_dim) + dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) + + qkv_shapes = [(dim_4_q_shape, dim_4_kv_shape)] + samples = [] + scales = [None, 1.0] + + for qkv_shape, is_causal, dropout_p, scale in product( + qkv_shapes, [True, False], [0.0, 0.5], scales): + shape_q, shape_kv = qkv_shape + samples.append(SampleInput( + make(shape_q).transpose(1, 2), + make(shape_kv).transpose(1, 2), + make(shape_kv).transpose(1, 2), + cum_seq_q=None, + cum_seq_k=None, + max_q=seq_q, + max_k=seq_kv, + dropout_p=dropout_p, + is_causal=is_causal, + return_debug_mask=False, + scale=scale, + )) + + yield from samples + +def sample_inputs_pairwise_distance(op_info, device, dtype, requires_grad, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + shape = (3,) + batched_shape = (2, *shape) + shapes_and_kwargs = [ + (shape, None), + (batched_shape, None), + (shape, dict(keepdim=True)), + (batched_shape, dict(keepdim=True)), + (shape, dict(p=5.0)), + (shape, dict(p=-1.0)), + (shape, dict(eps=1.0)), + ] + + return ( + SampleInput(make(shape), args=(make(shape),), kwargs=kwargs) for shape, kwargs in shapes_and_kwargs + ) + +def sample_inputs_pixel_shuffle(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield from ( + SampleInput(make_arg((1, 9, 2, 2)), upscale_factor=upscale_factor) + for upscale_factor in (1, 3) + ) + yield from ( + SampleInput(make_arg(shape), upscale_factor=1) + for shape in [ + (1, 0, 1, 1), + (1, 1, 0, 1), + (1, 1, 1, 0), + ] + ) + +def sample_inputs_pixel_unshuffle(op_info, device, dtype, requires_grad, **kwargs): + make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield from ( + SampleInput(make_arg((1, 1, 6, 6)), downscale_factor=downscale_factor) + for downscale_factor in (1, 3) + ) + yield from ( + SampleInput(make_arg(shape), downscale_factor=1) + for shape in [ + (1, 0, 1, 1), + (1, 1, 0, 1), + (1, 1, 1, 0), + ] + ) + +def sample_inputs_binary_cross_entropy(op_info, device, dtype, requires_grad, logits=False, **kwargs): + make = partial(make_tensor, device=device, dtype=dtype) + # Lower bounds must be greater than 'eps' defined in gradcheck.py::gradgradcheck() -> eps + # otherwise perturbation calculation causes Tensor value to become negative triggering + # a device-side hardware assertion + make_prob = partial(make, low=1e-6, high=1) + + reductions = ("mean", "sum", "none") + + shapes_and_kwargs = [ + *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], + *[((S, S), dict(reduction=reduction)) for reduction in reductions], + *[((S, S), dict(reduction=reduction, weight=make((S, S)))) for reduction in reductions], + ] + + if logits: + shapes_and_kwargs.extend( + [((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions] + ) + + for shape, kwargs in shapes_and_kwargs: + yield SampleInput( + (make if logits else make_prob)(shape, requires_grad=requires_grad), + args=(make_prob(shape, requires_grad=requires_grad),), + kwargs=kwargs, + ) + +def sample_inputs_allclose(op_info, device, dtype, requires_grad, **kwargs): + sample_shapes = [(), (S), (S, S, S)] + atols = [1e-2, 1e-16] + rtols = [1e-1, 0.5] + eps = 1e-8 + for s, rtol, atol in product(sample_shapes, rtols, atols): + # close sample + t = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) + close = (t + atol).detach().requires_grad_(requires_grad) + yield SampleInput(t, close, rtol=rtol, atol=atol) + + # random sample + a = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) + b = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput(a, b, rtol=rtol, atol=atol) + + +def sample_inputs_l1_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) + + # test COMPLEX_TO_FLOAT promotion + if dtype.is_complex: + make = partial(make_tensor, (), device=device, requires_grad=requires_grad) + yield SampleInput(make(dtype=dtype), args=(make(dtype=torch.double),)) + yield SampleInput(make(dtype=torch.double), args=(make(dtype=dtype),)) + +def error_inputs_l1_loss(op_info, device, **kwargs): + make = partial(make_tensor, device=device, dtype=torch.float32) + + # invalid reduction value + yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), + kwargs={'reduction': 'abc'}), + error_type=ValueError, + error_regex='abc is not a valid value for reduction') + # invalid input shapes + yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), + error_regex=(r'(Attempting to broadcast a dimension of length|' + r'The size of tensor a \(4\) must match the ' + r'size of tensor b \(5\) at non-singleton ' + r'dimension 1)') + ) + +def sample_inputs_smooth_l1_loss(op_info, device, dtype, requires_grad, **kwargs): + yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) + + make = partial(make_tensor, (S, S), device=device, dtype=dtype, requires_grad=requires_grad) + + # This test case always triggers the smooth condition, since absolute difference of input and target + # is smaller than beta + yield SampleInput(make(low=0, high=2), args=(make(low=-2, high=0),), kwargs=dict(beta=5)) + yield SampleInput(make(), args=(make(),), kwargs=dict(beta=0)) + +def sample_inputs_kl_div(op_info, device, dtype, requires_grad, **kwargs): + # kl_div works with inputs in [0, 1] (aka the pdf of a probability measure) + # Then log [0, 1] = (-inf, 0], so this is the log space + make_arg = partial(make_tensor, low=0., device=device, dtype=dtype, requires_grad=requires_grad) + + def make_log(shape): + out = torch.nn.functional.log_softmax(make_arg(shape), -1) + out.requires_grad_(requires_grad) + return out + + def make_prob(shape): + out = torch.nn.functional.softmax(make_arg(shape), -1) + out.requires_grad_(requires_grad) + return out + + shapes = ((2,), (2, 3)) + reductions = ("none", "mean", "batchmean", "sum") + for shape, reduction, log_target in product(shapes, reductions, (True, False)): + input = make_log(shape) + target = make_log(shape) if log_target else make_prob(shape) + yield SampleInput(input, args=(target,), kwargs=dict(reduction=reduction, log_target=log_target)) + +def sample_inputs_pdist(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield from (SampleInput(make_input((n, m))) for n, m in itertools.product((1, S), repeat=2)) + yield from (SampleInput(make_input((S, S)), kwargs=dict(p=p)) for p in (0.0, 1.0, 2.0, 10.0, float("inf"))) + +def reference_pdist(input, p=2): + pdist = scipy.spatial.distance.pdist + if p == 0: + output = pdist(input, "hamming") * input.shape[1] + elif p == float("inf"): + output = pdist(input, lambda x, y: np.abs(x - y).max()) + else: + output = pdist(input, "minkowski", p=p) + return output.astype(input.dtype) + +def sample_inputs_diagflat(op_info, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + yield SampleInput(make_input(())) + yield SampleInput(make_input((2,))) + yield SampleInput(make_input((2, 2))) + yield SampleInput(make_input((2,)), offset=1) + yield SampleInput(make_input((2,)), offset=-1) + +def sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): + unpool_name_to_pool_method_dict = { + 'nn.functional.max_unpool1d': torch.nn.functional.max_pool1d, + 'nn.functional.max_unpool2d': torch.nn.functional.max_pool2d, + 'nn.functional.max_unpool3d': torch.nn.functional.max_pool3d + } + + unpool_name_to_dim = { + 'nn.functional.max_unpool1d': 1, + 'nn.functional.max_unpool2d': 2, + 'nn.functional.max_unpool3d': 3 + } + + unpool_to_pool_name_dict = {k: f'nn.functional.{v.__name__}' for k, v in unpool_name_to_pool_method_dict.items()} + + pool_dim = unpool_name_to_dim[op_info.name] + pool_method = unpool_name_to_pool_method_dict[op_info.name] + + pool_op_info = copy.copy(op_info) + pool_op_info.name = unpool_to_pool_name_dict[op_info.name] + + for sample in sample_inputs_max_pool(pool_op_info, device, dtype, requires_grad, **kwargs): + # shapes (C, ...) do not work as of now, + # see https://github.com/pytorch/pytorch/issues/68337 + # TODO: remove once the issue is resolved + if sample.input.dim() != pool_dim + 2: + continue + + # No dilation > 1 for max_unpool, + # see https://github.com/pytorch/pytorch/issues/68420 + if sample.kwargs['dilation'] != 1: + continue + + # Can't unpool without indices + if sample.kwargs['return_indices']: + pool, indices = pool_method(sample.input, **sample.kwargs) + # arg has to be a leaf + arg = pool.detach().requires_grad_(requires_grad) + sample_kwargs = { + 'kernel_size': sample.kwargs['kernel_size'], + 'stride': sample.kwargs['stride'], + 'padding': sample.kwargs['padding'], + # output_size could be None but we specify it explicitly + # to compensate for the information lose in pool due + # to the floor/ceil operation used to compute the shapes + 'output_size': sample.input.size() + } + + yield SampleInput(arg, args=(indices,), kwargs=sample_kwargs) + +def sample_inputs_max_unpool_grad(op_info, device, dtype, requires_grad, **kwargs): + for sample in sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): + indices = sample.args[0] + # The samples for max_unpool are generated with max_pool. + # It could be that a single element from the max_pool's + # input is mapped to several locations in its output. + # This situation leads to failed gradchecks because + # the finite difference algorithm perturbs the elements + # of the output one by one, and not in classes of + # equivalences determined by whether two elements + # in the output are coming from the same location in the + # input (simply put, they have the same corresponding index). + # So, there are two ways to resolve this issue: + # 1. Extract a perturbation for one element and apply it all + # the elements from the same equivalence class, or + # 2. Make sure that the equivalence classes are all singletons, + # i.e. the index tensor has to be comprised of only unique + # indices. + # Here we go with the solution 2, the easiest of all. + if indices.unique().numel() == indices.numel(): + yield sample + +def sample_inputs_multi_head_attention_forward(opinfo, device, dtype, requires_grad, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + if requires_grad: + # backward tests would take too long to complete, causing the job timeout. + bsz = 2 + is_batcheds = (True,) + use_separate_proj_weights = (False,) + emb_sizes = (2,) + src_lens = (XS,) + tgt_lens = (XS,) + heads = (2,) + dropouts = (0.5,) + mask_types = ("2d",) + else: + bsz = 2 + is_batcheds = (False, True) + use_separate_proj_weights = (False, True) + emb_sizes = (2, 4) + src_lens = (XS,) + tgt_lens = (XS, S) + heads = (1, 2) + dropouts = (0.0, 0.5) + mask_types = (None, "2d", "3d") + + for is_batched, use_separate_proj_weight, mask_type, emb_size, src_len, tgt_len, num_heads, dropout_p in itertools.product( + is_batcheds, use_separate_proj_weights, mask_types, emb_sizes, src_lens, tgt_lens, heads, dropouts + ): + attn_mask = None + if mask_type == "2d": + attn_mask = make_input(src_len, tgt_len) + elif mask_type == "3d": + attn_mask = make_input((bsz if is_batched else 1) * num_heads, src_len, tgt_len) + + if is_batched: + q = make_input(src_len, bsz, emb_size) + k = make_input(tgt_len, bsz, emb_size) + v = make_input(tgt_len, bsz, emb_size) + else: + q = make_input(src_len, emb_size) + k = make_input(tgt_len, emb_size) + v = make_input(tgt_len, emb_size) + if use_separate_proj_weight: + in_proj_weight = None + q_proj_weight = make_input(emb_size, emb_size) + k_proj_weight = make_input(emb_size, emb_size) + v_proj_weight = make_input(emb_size, emb_size) + else: + in_proj_weight = make_input(emb_size * 3, emb_size) + q_proj_weight = None + k_proj_weight = None + v_proj_weight = None + + bias_k = make_input(emb_size) + bias_v = make_input(emb_size) + in_proj_bias = make_input(emb_size * 3) + out_proj_weight = make_input(emb_size, emb_size) + out_proj_bias = make_input(emb_size) + sample_args = ( + k, v, emb_size, num_heads, in_proj_weight, + in_proj_bias, bias_k, bias_v, False, + dropout_p, out_proj_weight, out_proj_bias + ) + sample_kwargs = { + "q_proj_weight" : q_proj_weight, + "k_proj_weight" : k_proj_weight, + "v_proj_weight" : v_proj_weight, + "attn_mask" : attn_mask, + "training" : True if dropout_p > 0.0 else False, + "use_separate_proj_weight" : use_separate_proj_weight + } + + yield SampleInput(q, args=sample_args, kwargs=sample_kwargs) + + +# Includes some values such that N * N won't be a multiple of 4, +# which should ensure we test the vectorized and non-vectorized +# kernel code paths. +NUM_SIZE0_TENSORS = 10000 +foreach_num_tensors = [20, 23] if not TEST_WITH_SLOW else [23, 30, 300] +_foreach_inputs_default_kwargs = {"noncontiguous": False, "same_size": False, "low": None, "high": None} + + +class ForeachRightmostArgType(enum.Enum): + TensorList = enum.auto() + ScalarList = enum.auto() + Scalar = enum.auto() + Tensor = enum.auto() + + +class ForeachSampleInput(SampleInput): + # For TensorList Scalar/Tensor, we compute the reference + # by converting it into TensorList ScalarList/TensorList and + # then converting into multiple Tensor Scalar/Tensor. + # ref_args contains the args converted to TensorList ScalarList/TensorList + ref_args: Any + disable_fastpath: bool + + def __init__(self, *args, disable_fastpath=False, ref_args=None, **kwargs): + super().__init__(*args, **kwargs) + self.ref_args = ref_args or self.args + self.disable_fastpath = disable_fastpath + + +class foreach_inputs_sample_func: + def __init__( + self, + arity: int, + rightmost_supports_scalar: bool, + rightmost_supports_scalarlist: bool, + rightmost_supports_tensor: bool = False, + ) -> None: + self.arity = arity + self._set_rightmost_arg_types( + rightmost_supports_scalar, rightmost_supports_scalarlist, rightmost_supports_tensor, + ) + + def _set_rightmost_arg_types( + self, + rightmost_supports_scalar: bool, + rightmost_supports_scalarlist: bool, + rightmost_supports_tensor: bool, + ) -> None: + self._rightmost_arg_types = [ForeachRightmostArgType.TensorList] + if self.arity > 1: + if rightmost_supports_scalar: + self._rightmost_arg_types.append(ForeachRightmostArgType.Scalar) + if rightmost_supports_scalarlist: + self._rightmost_arg_types.append(ForeachRightmostArgType.ScalarList) + if rightmost_supports_tensor: + self._rightmost_arg_types.append(ForeachRightmostArgType.Tensor) + + def _sample_rightmost_arg(self, opinfo, rightmost_arg_type, device, dtype, num_tensors, **_foreach_inputs_kwargs): + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + return [sample_inputs_foreach(None, device, dtype, num_tensors, **_foreach_inputs_kwargs)] + if rightmost_arg_type == ForeachRightmostArgType.Tensor: + return [make_tensor( + (), device=device, dtype=dtype, + noncontiguous=_foreach_inputs_kwargs["noncontiguous"], + requires_grad=_foreach_inputs_kwargs.get("requires_grad", False), + )] + should_use_simpler_scalars = opinfo.name == "_foreach_pow" and dtype in (torch.float16, torch.bfloat16) + + def sample_float(): + s = random.random() + if should_use_simpler_scalars: + return 1.0 if s > 0.5 else 2.0 + else: + return 1.0 - s + + high = 2 if should_use_simpler_scalars else 9 + if rightmost_arg_type == ForeachRightmostArgType.ScalarList: + return [ + [random.randint(0, high) + 1 for _ in range(num_tensors)], + [sample_float() for _ in range(num_tensors)], + [complex(sample_float(), sample_float()) for _ in range(num_tensors)], + [True for _ in range(num_tensors)], + [1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 3)], + [True, 1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 4)], + ] + if rightmost_arg_type == ForeachRightmostArgType.Scalar: + return ( + random.randint(1, high + 1), + sample_float(), + True, + complex(sample_float(), sample_float()), + ) + raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}") + + def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + if self.arity == 1: + if "foreach_abs" in opinfo.name and dtype in complex_types(): + return True + # unary + if opinfo.ref in (torch.abs, torch.neg): + return False + return dtype in integral_types_and(torch.bool) + if self.arity < 2 or rightmost_arg_type == ForeachRightmostArgType.Tensor: + return None + if "foreach_pow" in opinfo.name and dtype in integral_types(): + return True + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + disable_fastpath = "foreach_div" in opinfo.name and dtype in integral_types_and(torch.bool) + if "foreach_add" in opinfo.name and dtype == torch.bool: + disable_fastpath = True + return disable_fastpath + elif rightmost_arg_type == ForeachRightmostArgType.Scalar: + disable_fastpath = "foreach_div" in opinfo.name and dtype in integral_types_and(torch.bool) + if isinstance(rightmost_arg, bool): + disable_fastpath |= dtype == torch.bool + if opinfo.ref in (torch.add, torch.mul): + disable_fastpath = False + elif isinstance(rightmost_arg, int): + disable_fastpath |= dtype == torch.bool + elif isinstance(rightmost_arg, float): + disable_fastpath |= dtype in integral_types_and(torch.bool) + elif isinstance(rightmost_arg, complex): + disable_fastpath |= dtype not in complex_types() + else: + raise AssertionError(f"Invalid scalar of type {rightmost_arg_type} - {rightmost_arg}") + return disable_fastpath + elif rightmost_arg_type == ForeachRightmostArgType.ScalarList: + disable_fastpath = opinfo.ref == torch.div and dtype in integral_types_and(torch.bool) + elmt_t = type(rightmost_arg[0]) + has_same_type = all(isinstance(v, elmt_t) for v in rightmost_arg) + if not has_same_type: + return dtype not in complex_types() + if isinstance(rightmost_arg[0], bool): + if ("foreach_add" in opinfo.name or "foreach_mul" in opinfo.name) and dtype == torch.bool: + disable_fastpath = False + elif isinstance(rightmost_arg[0], int): + disable_fastpath |= dtype == torch.bool + elif isinstance(rightmost_arg[0], float): + disable_fastpath |= dtype in integral_types_and(torch.bool) + elif isinstance(rightmost_arg[0], complex): + disable_fastpath |= dtype not in complex_types() + else: + raise AssertionError(f"Invalid scalarlist of {rightmost_arg}") + return disable_fastpath + else: + raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}") + + def _sample_kwargs(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + kwargs = {} + if rightmost_arg_type == ForeachRightmostArgType.TensorList and opinfo.supports_alpha_param: + if dtype in integral_types_and(torch.bool): + kwargs["alpha"] = 3 + elif dtype.is_complex: + kwargs["alpha"] = complex(3, 3) + else: + kwargs["alpha"] = 3.14 + if self.arity > 1: + kwargs["disable_fastpath"] = self._should_disable_fastpath(opinfo, rightmost_arg, rightmost_arg_type, dtype) + return kwargs + + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + assert "num_input_tensors" not in kwargs + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + for rightmost_arg_type in self._rightmost_arg_types: + zero_size_foreach_inputs_kwargs = copy.deepcopy(_foreach_inputs_kwargs) + zero_size_foreach_inputs_kwargs["zero_size"] = True + input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, **zero_size_foreach_inputs_kwargs) + if self.arity > 1: + args = [ + sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, **zero_size_foreach_inputs_kwargs) + for _ in range(self.arity - 2) + ] + args.append( + self._sample_rightmost_arg( + opinfo, ForeachRightmostArgType.TensorList, device, dtype, NUM_SIZE0_TENSORS, + **zero_size_foreach_inputs_kwargs)[0]) + kwargs = self._sample_kwargs( + opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype, zero_size=True) + else: + args = [] + kwargs = {} + if opinfo.ref in (torch.abs, torch.neg): + kwargs["disable_fastpath"] = False + else: + kwargs["disable_fastpath"] = dtype in integral_types_and(torch.bool) + yield ForeachSampleInput(input, *args, **kwargs) + + def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): + num_input_tensors_specified = "num_input_tensors" in kwargs + num_input_tensors = kwargs.pop("num_input_tensors") if num_input_tensors_specified else foreach_num_tensors + assert isinstance(num_input_tensors, list) + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + _foreach_inputs_kwargs["zero_size"] = False + + # add empty tensor interspersion to test fully fixing #100701 + for num_tensors, rightmost_arg_type, intersperse_empty_tensors in itertools.product( + num_input_tensors, self._rightmost_arg_types, (True, False)): + if intersperse_empty_tensors and (num_tensors != max(num_input_tensors) or str(device) == 'cpu'): + # generate interspersed empty tensors for only 1 N on non-cpu device to lessen redundancy + continue + _foreach_inputs_kwargs["intersperse_empty_tensors"] = intersperse_empty_tensors + input = sample_inputs_foreach( + None, device, dtype, num_tensors, **_foreach_inputs_kwargs) + args = [] + if self.arity > 1: + args = [ + sample_inputs_foreach( + None, device, dtype, num_tensors, **_foreach_inputs_kwargs) + for _ in range(self.arity - 2) + ] + rightmost_arg_list = self._sample_rightmost_arg( + opinfo, rightmost_arg_type, device, dtype, num_tensors, + **_foreach_inputs_kwargs) + for rightmost_arg in rightmost_arg_list: + args.append(rightmost_arg) + kwargs = self._sample_kwargs(opinfo, rightmost_arg, rightmost_arg_type, dtype) + ref_args = args + if rightmost_arg_type in (ForeachRightmostArgType.Scalar, ForeachRightmostArgType.Tensor): + ref_args = args[:-1] + [[args[-1] for _ in range(num_tensors)]] + sample = ForeachSampleInput(input, *args, ref_args=ref_args, **kwargs) + yield sample + args.pop() + else: + yield ForeachSampleInput( + input, + *args, + disable_fastpath=self._should_disable_fastpath(opinfo, None, None, dtype), + ) + + +class foreach_norm_sample_func(foreach_inputs_sample_func): + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + assert "num_input_tensors" not in kwargs + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + for ord in (0, 1, 2, -1, -2): + input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) + disable_fastpath = True + if ord in (1, 2) and dtype in floating_types_and(torch.half, torch.bfloat16): + disable_fastpath = False + yield ForeachSampleInput(input, ord=ord, disable_fastpath=disable_fastpath) + + def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): + num_input_tensors = kwargs.pop("num_input_tensors", foreach_num_tensors) + assert isinstance(num_input_tensors, list) + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + + for num_tensors, ord in product(num_input_tensors, (0, 1, 2, -1, -2)): + input = sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + disable_fastpath = True + if ord in (1, 2) and dtype in floating_types_and(torch.half, torch.bfloat16): + disable_fastpath = False + yield ForeachSampleInput(input, ord=ord, disable_fastpath=disable_fastpath) + + +class foreach_lerp_sample_func(foreach_inputs_sample_func): + def _sample_rightmost_arg(self, opinfo, rightmost_arg_type, device, dtype, num_tensors, **_foreach_inputs_kwargs): + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + return [sample_inputs_foreach(None, device, dtype, num_tensors, **_foreach_inputs_kwargs)] + if rightmost_arg_type == ForeachRightmostArgType.ScalarList: + return [ + [random.randint(0, 9) + 1 for _ in range(num_tensors)], + [1.0 - random.random() for _ in range(num_tensors)], + [complex(1.0 - random.random(), 1.0 - random.random()) for _ in range(num_tensors)], + [True for _ in range(num_tensors)], + [1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 3)], + [True, 1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 4)], + ] + if rightmost_arg_type == ForeachRightmostArgType.Scalar: + return [random.random()] + raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}") + + +class foreach_pointwise_sample_func(foreach_inputs_sample_func): + + def __init__( + self, + arity: int = 3, + rightmost_supports_scalar: bool = False, + rightmost_supports_scalarlist: bool = False, + ): + super().__init__(arity, rightmost_supports_scalar, rightmost_supports_scalarlist) + + def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): + return dtype in integral_types_and(torch.bool) and opinfo.ref in (torch.addcmul,) + + def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): + assert "num_input_tensors" not in kwargs + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + # zero_size tensor + input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) + args = [ + sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) + for _ in range(2) + ] + if "scalars" in kwargs: + del kwargs["scalars"] + kwargs.update(self._sample_kwargs(opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype)) + yield ForeachSampleInput(input, *args, **kwargs) + + def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): + num_input_tensors_specified = "num_input_tensors" in kwargs + num_input_tensors = kwargs.pop("num_input_tensors") if num_input_tensors_specified else foreach_num_tensors + assert isinstance(num_input_tensors, list) + _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} + _foreach_inputs_kwargs["requires_grad"] = requires_grad + + for num_tensors, rightmost_arg_type in itertools.product(num_input_tensors, self._rightmost_arg_types): + input = sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + args = [ + sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + for _ in range(2 - int(rightmost_arg_type == ForeachRightmostArgType.TensorList)) + ] + rightmost_arg_list = self._sample_rightmost_arg( + opinfo, rightmost_arg_type, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) + for rightmost_arg in rightmost_arg_list: + kwargs = {} + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + args.append(rightmost_arg) + elif rightmost_arg_type in [ForeachRightmostArgType.Tensor, ForeachRightmostArgType.ScalarList]: + kwargs["scalars"] = rightmost_arg + else: + kwargs["value"] = rightmost_arg + kwargs.update(self._sample_kwargs(opinfo, rightmost_arg, rightmost_arg_type, dtype)) + assert len(args) == 2, f"{len(args)=}" + sample = ForeachSampleInput(input, *args, **kwargs) + yield sample + if rightmost_arg_type == ForeachRightmostArgType.TensorList: + args.pop() + + +foreach_unary_op_db: List[OpInfo] = [ + ForeachFuncInfo( + 'exp', + foreach_inputs_sample_func(1, False, False), + backward_requires_result=True, + ), + ForeachFuncInfo( + 'acos', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'asin', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'atan', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'cos', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'cosh', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'log', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'log10', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'log2', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'tan', + foreach_inputs_sample_func(1, False, False), + backward_requires_result=True, + decorators=( + # due to https://github.com/pytorch/pytorch/pull/102427 enabling jiterator for complex + DecorateInfo( + toleranceOverride( + { + torch.complex64: tol(atol=3e-04, rtol=2e-05) + } + ), + 'TestForeach', + 'test_parity', + device_type='cuda' + ), + ), + ), + ForeachFuncInfo( + 'tanh', + foreach_inputs_sample_func(1, False, False), + backward_requires_result=True, + decorators=( + DecorateInfo( + toleranceOverride( + {torch.complex64: tol(atol=5e-03, rtol=1e-04)} + ), + 'TestForeach', + 'test_parity', + device_type='cuda' + ), + ), + ), + ForeachFuncInfo( + 'sin', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'sinh', + foreach_inputs_sample_func(1, False, False), + ), + ForeachFuncInfo( + 'neg', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and_complex(), + dtypesIfCUDA=all_types_and_complex(), + ), + ForeachFuncInfo( + 'sqrt', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_and_complex_types_and(torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half), + backward_requires_result=True, + ), + ForeachFuncInfo( + 'ceil', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'erf', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'erfc', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'expm1', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_and_complex_types_and(torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + backward_requires_result=True, + ), + ForeachFuncInfo( + 'floor', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'log1p', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_and_complex_types_and(torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half), + ), + ForeachFuncInfo( + 'round', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'frac', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'reciprocal', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half), + backward_requires_result=True, + ), + ForeachFuncInfo( + 'sigmoid', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half), + backward_requires_result=True, + ), + ForeachFuncInfo( + 'trunc', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16), + ), + ForeachFuncInfo( + 'abs', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestMeta", + "test_dispatch_symbolic_meta_inplace", dtypes=complex_types()), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestMeta", + "test_dispatch_meta_inplace", dtypes=complex_types()), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestMeta", + "test_meta_inplace", dtypes=complex_types()), + ), + ), + ForeachFuncInfo( + 'zero', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_out=False, + ), + ForeachFuncInfo( + 'sign', + foreach_inputs_sample_func(1, False, False), + dtypes=floating_types_and(torch.bool, torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + ), + ForeachFuncInfo( + 'lgamma', + foreach_inputs_sample_func(1, False, False), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + dtypesIfCUDA=all_types_and(torch.bool, torch.float16), + skips=( + DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", + "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", + "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool)), + DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", + "test_meta_inplace", dtypes=integral_types_and(torch.bool)), + ), + ), +] + +foreach_binary_op_db: List[OpInfo] = [ + ForeachFuncInfo( + "add", + foreach_inputs_sample_func(2, True, True, True), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_alpha_param=True, + skips=( + # These tests fail with aten._local_scalar_dense not being implemented. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + ), + ), + ForeachFuncInfo( + "sub", + foreach_inputs_sample_func(2, True, True), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_alpha_param=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + ForeachFuncInfo( + "mul", + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + sample_inputs_func=foreach_inputs_sample_func(2, True, True, True), + skips=( + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + ), + ), + ForeachFuncInfo( + "div", + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + sample_inputs_func=foreach_inputs_sample_func(2, True, True, True), + skips=( + # Samples have complex types and inplace only works if the dtype is complex. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)), + # fails with div_cpu is not implemented with ComplexHalf + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", + dtypes=(torch.float16,), device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", + dtypes=(torch.float16,), device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", + dtypes=(torch.float16,), device_type='cpu'), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", + dtypes=(torch.float16,), device_type='cpu'), + ), + ), + ForeachFuncInfo( + "clamp_min", + foreach_inputs_sample_func(2, True, True), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + ForeachFuncInfo( + "clamp_max", + foreach_inputs_sample_func(2, True, True), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + # note(crcrpar): forward ad not implemented. + ForeachFuncInfo( + "minimum", + foreach_inputs_sample_func(2, True, True), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=False, + supports_inplace_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + # note(crcrpar): forward ad not implemented. + ForeachFuncInfo( + "maximum", + foreach_inputs_sample_func(2, True, True), + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=False, + supports_inplace_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + ForeachFuncInfo( + "pow", + dtypes=all_types_and(torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + supports_alpha_param=False, + supports_scalar_self_arg=True, + sample_inputs_func=foreach_inputs_sample_func(2, True, True), + supports_autograd=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + supports_forward_ad=True, + backward_requires_result=True, + ), + ForeachFuncInfo( + "copy", + foreach_inputs_sample_func(2, False, False), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_out=False, + supports_forward_ad=False, + supports_autograd=False, + ) +] + +foreach_pointwise_op_db: List[ForeachFuncInfo] = [ + ForeachFuncInfo( + "addcmul", + foreach_pointwise_sample_func(4, True, True), + dtypes=all_types_and_complex(), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), + ForeachFuncInfo( + "addcdiv", + sample_inputs_func=foreach_pointwise_sample_func(4, True, True), + dtypes=all_types_and_complex(), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), +] + +foreach_reduce_op_db: List[ForeachFuncInfo] = [ + ForeachFuncInfo( + "norm", + foreach_norm_sample_func(1, False, False), + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + skips=( + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + ), + ), +] + +foreach_other_op_db: List[ForeachFuncInfo] = [ + ForeachFuncInfo( + "lerp", + foreach_lerp_sample_func(3, True, False), + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), + ), +] + +def reference_sign(x): + if x.dtype == np.bool_: + # `np.sign` doesn't support `bool`. + # >>> np.sign(True) + # ufunc 'sign' did not contain a loop + # with signature matching types dtype('bool') -> dtype('bool') + return np.sign(x, dtype=np.uint8).astype(np.bool_) + return np.sign(x) + + +def reference_sgn(x): + # NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex. + # For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j. + # while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input) + if x.dtype not in [np.complex64, np.complex128]: + return reference_sign(x) + + out = (x / np.abs(x)) + if out.ndim == 0: + # Handle x == 0 case + if (x == 0): + # Can't assign to np.complex object + # So make a new one. + return np.array(complex(0, 0), dtype=x.dtype) + return out + + # Handle x == 0 case + mask = (x == 0) + out[mask] = complex(0, 0) + return out + + +def reference_sigmoid(x): + # 'scipy.special.expit' not supported for the input types + if x.dtype in [np.complex64, np.complex128]: + return (1 / (1 + np.exp(-x))) + return scipy.special.expit(x) + + +def reference_logsigmoid(x): + return np.where( + x < 0, + x - np.log1p(np.exp(x)), + -np.log1p(np.exp(-x))) + + +def reference_hardsigmoid(x): + intermediate = x / 6 + 0.5 + y = np.clip(intermediate, 0, None) + return np.where(y > 1, 1, y).astype(x.dtype) + + +def reference_lgamma(x): + # scipy.special.gammaln returns `-inf` when input is `-inf`. + # While Pytorch, C and C++, all return `inf` when input is `-inf`. + # Reference: + # https://en.cppreference.com/w/cpp/numeric/math/lgamma + # https://en.cppreference.com/w/c/numeric/math/lgamma + + # To handle the above discrepancy, + # we replace -inf with inf so values + # that were originally -inf map to inf as expected + if x.dtype.kind == 'f': + x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x) + + out = scipy.special.gammaln(x) + + if x.dtype == np.float16: + # `scipy.special.gammaln` returns output of float32 when input is float16, + # while `torch.lgamma` preserves `float16`. But due to smaller range of float16, + # Pytorch version outputs `inf` while SciPy returns finite values. + out = out.astype(np.float16) + + return out + + +def reference_mvlgamma(x, d): + if x.dtype == np.float16: + return scipy.special.multigammaln(x, d).astype(np.float16) + + return scipy.special.multigammaln(x, d) + +def reference_softplus(input, beta=1, threshold=20): + non_linear = input * beta <= threshold + output = input.copy() + output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta + return output + +def reference_gelu(X, *, approximate='none'): + def _gelu_ref(X): + return X * stats.norm.cdf(X) + + def _tanh_gelu_ref(X): + M_SQRT_2_PI = math.sqrt(2 / math.pi) + Z = M_SQRT_2_PI * (X + 0.044715 * np.power(X, 3.0)) + return 0.5 * X * (1.0 + np.tanh(Z)) + + if approximate == 'tanh': + return _tanh_gelu_ref(X) + else: + return _gelu_ref(X) + + +def reference_one_hot(a: np.ndarray, num_classes: int = -1) -> np.ndarray: + if num_classes == -1: + num_classes = int(np.amax(a) + 1) + + idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes + one_hot = np.zeros((a.size, num_classes), dtype=a.dtype) + np.put(one_hot, idcs, 1) + return one_hot.reshape(*a.shape, -1) + + +def reference_mse_loss(input, target, reduction="mean"): + se = (input - target) ** 2 + if reduction == "mean": + return np.mean(se) + elif reduction == "sum": + return np.sum(se) + else: # reduction == "none" + return se + + +def wrapper_set_seed(op, *args, **kwargs): + """Wrapper to set seed manually for some functions like dropout + See: https://github.com/pytorch/pytorch/pull/62315#issuecomment-896143189 for more details. + """ + with freeze_rng_state(): + torch.manual_seed(42) + output = op(*args, **kwargs) + + if isinstance(output, torch.Tensor) and output.device.type == "lazy": + # We need to call mark step inside freeze_rng_state so that numerics + # match eager execution + torch._lazy.mark_step() + + return output + + +def reference_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight=None, bias=None, eps=1e-5): + return reference_native_layer_norm(inp, normalized_shape, weight, bias, eps)[0] + + +def reference_native_layer_norm(inp: np.ndarray, normalized_shape: Tuple[int], weight, bias, eps): + feature_size = np.prod(normalized_shape) + inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload] + mean = inp_view.mean(axis=-1, keepdims=True) + var = inp_view.var(axis=-1, ddof=0, keepdims=True) + Y = (inp_view - mean) / np.sqrt(var + eps) + if weight is None and bias is not None: + Y = Y + bias.reshape(-1) + elif weight is not None and bias is None: + Y = Y * weight.reshape(-1) + elif weight is not None and bias is not None: + Y = Y * weight.reshape(-1) + bias.reshape(-1) + axis = inp.ndim - len(normalized_shape) + stat_shape = inp.shape[:axis] + (1,) * len(normalized_shape) + return Y.reshape(*inp.shape), mean.reshape(stat_shape), (1.0 / np.sqrt(var + eps)).reshape(stat_shape) + + +def reference_group_norm(inp: np.ndarray, num_groups: int, weight=None, bias=None, eps=1e-5): + inp_view = inp + if np.prod(inp.shape) != 0: + inp_view = inp.reshape((inp.shape[0], num_groups, -1)) + mean = inp_view.mean(axis=-1, keepdims=True) + var = inp_view.var(axis=-1, ddof=0, keepdims=True) + Y = (inp_view - mean) / np.sqrt(var + eps) + Y = Y.reshape(inp.shape) + if weight is not None: + # weight is a vector of length equal to the channel + if len(Y.shape) > 2: + weight = np.expand_dims(weight, [0] + [idx + 2 for idx in range(inp.ndim - 2)]) + Y = Y * weight + if bias is not None: + # bias is a vector of length equal to the channel + if len(Y.shape) > 2: + bias = np.expand_dims(bias, [0] + [idx + 2 for idx in range(inp.ndim - 2)]) + Y = Y + bias + return Y + + +# using a custom reference function since numpy only has a string side arg (instead of right and side) and doesn't +# have an out_int32 arg. Additionally, numpy doesn't support searchsorted with ND arrays, so this splits those into +# stacked 1D cases +def reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=False, side='left', sorter=None): + side = 'right' if (right or side == 'right') else 'left' + if len(sorted_sequence.shape) == 1 : + ret = np.searchsorted(sorted_sequence, boundary, side=side, sorter=sorter) + return ret.astype(np.int32) if out_int32 else ret + elif sorted_sequence.shape[0] == 0: + if sorter is not None: + sorter = sorter.flatten() + ret = np.searchsorted(sorted_sequence.flatten(), boundary.flatten(), side=side, sorter=sorter) + ret = ret.astype(np.int32) if out_int32 else ret + return ret.reshape(boundary.shape) + else: + # numpy searchsorted only supports 1D inputs so we split up ND inputs + orig_shape = boundary.shape + num_splits = np.prod(sorted_sequence.shape[:-1]) + splits = range(0, num_splits) + sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1) + if sorter is not None: + sorter = sorter.reshape(num_splits, -1) + + split_sequence = [sorted_sequence[i] for i in splits] + split_boundary = [boundary[i] for i in splits] + split_sorter = [sorter[i] if (sorter is not None) else None for i in splits] + + split_ret = [np.searchsorted(s_seq, b, side=side, sorter=s_sort) + for (s_seq, b, s_sort) in zip(split_sequence, split_boundary, split_sorter)] + split_ret = [i.astype(np.int32) for i in split_ret] if out_int32 else split_ret + return np.stack(split_ret).reshape(orig_shape) + +def loss_reference_reduction_wrapper(fn): + def wrapper(input, target, *, size_average=None, reduce=None, reduction="mean", **other_kwargs): + if size_average is not None or reduce is not None: + raise RuntimeError( + "The keyword arguments 'size_average' and 'reduce' are deprecated and not supported by this wrapper" + ) + output = fn(input, target, **other_kwargs) + if reduction == "mean": + return np.mean(output) + elif reduction == "sum": + return np.sum(output) + else: # reduction == "none" + return output + + return wrapper + +@loss_reference_reduction_wrapper +def reference_smooth_l1_loss(input, target, beta=1.0): + diff = input - target + abs_diff = np.abs(diff) + above_threshold = abs_diff >= beta + + loss = np.empty_like(input) + loss[above_threshold] = abs_diff[above_threshold] - 0.5 * beta + loss[~above_threshold] = diff[~above_threshold] ** 2 / (2 * beta) + + return loss + +def reference_std_var(f): + """Forwards unbiased/correction kwargs as NumPy's equivalent ddof""" + g = reference_reduction_numpy(f) + + @wraps(g) + def wrapper(x: np.ndarray, *args, **kwargs): + assert not ('unbiased' in kwargs and 'correction' in kwargs) + + if 'unbiased' in kwargs: + kwargs['ddof'] = int(kwargs.pop('unbiased')) + elif 'correction' in kwargs: + kwargs['ddof'] = kwargs.pop('correction') + + return g(x, *args, **kwargs) + + return wrapper + +def generate_std_var_kwargs(t: torch.Tensor, **kwargs): + """Generates unbiased/correction kwargs for std/var operators""" + yield ((), {'unbiased': True}) + yield ((), {'unbiased': False}) + + # Currently, calling std with correction is only enabled when + # both dim and keepdim are provided. + if 'dim' in kwargs and 'keepdim' in kwargs: + yield ((), {'correction': 0}) + yield ((), {'correction': 1}) + + numel = torch.tensor(t.shape)[kwargs.get('dim')].prod() + yield ((), {'correction': numel // 2}) + +def error_inputs_mean(op_info, device, is_ref=False, **kwargs): + if is_ref: + err_msg1 = (r"mean\(\): could not infer output dtype. " + r"Input dtype must be either a floating point or complex dtype. " + r"Got: torch.int64") + else: + err_msg1 = (r"mean\(\): could not infer output dtype. " + r"Input dtype must be either a floating point or complex dtype. " + r"Got: Long") + yield ErrorInput( + SampleInput(make_tensor((3, 4, 5), dtype=torch.int64, device=device), []), + error_regex=err_msg1, + ) + + if is_ref: + err_msg2 = (r"mean\(\): could not infer output dtype. " + r"Optional dtype must be either a floating point or complex dtype. " + r"Got: torch.int64") + else: + err_msg2 = (r"mean\(\): could not infer output dtype. " + r"Optional dtype must be either a floating point or complex dtype. " + r"Got: Long") + yield ErrorInput( + SampleInput( + make_tensor((3, 4, 5), dtype=torch.float32, device=device), + [], + dtype=torch.int64), + error_regex=err_msg2 + ) + + if is_ref: + err_msg3 = "Expected out tensor to have dtype torch.float64, but got torch.float32 instead" + else: + err_msg3 = "Expected out tensor to have dtype double, but got float instead" + yield ErrorInput( + SampleInput( + make_tensor((3, 4, 5), dtype=torch.int64, device=device), + [], + dtype=torch.float64, + out=make_tensor([], dtype=torch.float32, device=device), + ), + error_regex=err_msg3 + ) + +# numpy implementation of torch.flatten +# unfortunately there's no np.flatten. we figure out the desired shape and call np.reshape +def reference_flatten(input, start_dim=0, end_dim=-1): + in_shape = input.shape + in_rank = len(in_shape) + for d in start_dim, end_dim: + if not ((in_rank == 0 and d in (-1, 0)) or -in_rank <= d < in_rank): + raise IndexError(f"Dimension out of range (expected to be in range of [{-in_rank}, {in_rank-1}], but got {d}") + end_dim = end_dim if end_dim >= 0 else in_rank + end_dim + start_dim = start_dim if start_dim >= 0 else in_rank + start_dim + if in_rank == 0: + end_dim = start_dim + if end_dim < start_dim: + raise RuntimeError("flatten() has invalid args: start_dim cannot come after end_dim") + flatten_bit_dim = functools.reduce(operator.mul, in_shape[start_dim:end_dim + 1], 1) + out_shape = in_shape[:start_dim] + (flatten_bit_dim,) + in_shape[end_dim + 1:] + return np.reshape(input, out_shape) + +# Operator database (sorted alphabetically) +op_db: List[OpInfo] = [ + UnaryUfuncInfo('abs', + aliases=('absolute', ), + ref=np.abs, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + skips=( + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestBwdGradients', + 'test_inplace_grad', dtypes=(torch.cdouble,)), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestBwdGradients', + 'test_inplace_gradgrad', dtypes=(torch.cdouble,)), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestFwdGradients', + 'test_inplace_forward_mode_AD', dtypes=(torch.cdouble,)), + DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestSparseUnaryUfuncs", + "test_inplace", dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + # Reference: https://github.com/pytorch/pytorch/issues/49224 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=[torch.int8], active_if=TEST_WITH_ASAN), + # TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input) + # We can break the logic of the loop over all possible types but it is OK. + # https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes', + dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace_all_strides', + dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), + ), + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True), + # NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952) + UnaryUfuncInfo('acos', + aliases=('arccos', ), + ref=np.arccos, + domain=(-1, 1), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-1, + torch.complex64: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_method_grad', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_inplace_grad', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_inplace_forward_mode_AD', + dtypes=[torch.cdouble], active_if=IS_WINDOWS),)), + # NOTE: the derivative for inplace acosh is not implemented + UnaryUfuncInfo('acosh', + aliases=('arccosh', ), + ref=np.arccosh, + domain=(1, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + # acosh is not defined at x < 1 (real) + reference_numerics_filter=NumericsFilter( + condition=lambda x: (x < 1 if not x.is_complex() else torch.zeros_like(x, dtype=torch.bool)), + safe_val=2)), + BinaryUfuncInfo('add', + # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate + ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ + else np.add(input, np.multiply(alpha, other)), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, + torch.float16, torch.chalf), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_add_sub, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + supports_two_python_scalars=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + ), + skips=( + # boolean alpha not handled properly + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bool,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestCommon', + 'test_numpy_refs', + dtypes=(torch.complex128,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('item', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.item, inp, *args, **kwargs), + ref=np.ndarray.item, + method_variant=None, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf, torch.bool), + supports_out=False, + supports_autograd=False, + error_inputs_func=error_inputs_item, + sample_inputs_func=sample_inputs_item, + skips=( + # Error testing item function variant + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float32, torch.complex64)), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: Composite compliance check failed with the above error. + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + # Booleans mismatch: AssertionError: False is not true + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_autocast'), + # Booleans mismatch: AssertionError: False is not true + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake'), + )), + OpInfo('arange', + dtypes=all_types_and(torch.bfloat16, torch.float16), + supports_out=True, + supports_autograd=False, + is_factory_function=True, + error_inputs_func=error_inputs_arange, + sample_inputs_func=sample_inputs_arange, + skips=( + # https://github.com/pytorch/pytorch/issues/81774 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Lazy tensor failures + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'), + + # Exception raised from analyzeImpl at ../torch/csrc/jit/ir/alias_analysis.cpp:608 + # We don't have an op for aten::arange but it isn't a special case. + # Argument types: bool, bool, bool, int, int, Device, boo + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + + # Captured graph does not contain aten::arange (succeeds on complex!) + # g: graph(): + # %25 : Long(1, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value={1}]() + # return (%25) + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('cauchy', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.cauchy_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.cauchy_, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_cauchy, + error_inputs_func=error_inputs_cauchy, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('exponential', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.exponential_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.exponential_, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_exponential, + error_inputs_func=error_inputs_exponential, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.expectedFailure, "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.expectedFailure, "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('geometric', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.geometric_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.geometric_, + dtypes=floating_types_and(torch.float16, torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64, torch.uint8), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_geometric, + error_inputs_func=error_inputs_geometric, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('log_normal', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.log_normal_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.log_normal_, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_log_normal, + error_inputs_func=error_inputs_log_normal, + skips=( + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('normal', + variant_test_name='in_place', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.normal_, inp, *args, **kwargs), + inplace_variant=torch.Tensor.normal_, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_normal, + error_inputs_func=error_inputs_normal, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # vmap: calling random operator not supported + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + )), + OpInfo('uniform', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.uniform_, inp, *args, **kwargs), + method_variant=None, + inplace_variant=torch.Tensor.uniform_, + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + supports_out=False, + supports_autograd=False, + is_factory_function=False, + sample_inputs_func=sample_inputs_uniform, + error_inputs_func=error_inputs_uniform, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # aten.uniform was not decomposed + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + BinaryUfuncInfo('clamp_max', + ref=_clamp_max_numpy, + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_rhs_python_scalar=False, + supports_fwgrad_bwgrad=True, + rhs_make_tensor_kwargs=dict(exclude_zero=False), + skips=( + # RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + # dispatch to lazy test failed + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'), + )), + BinaryUfuncInfo('clamp_min', + ref=_clamp_min_numpy, + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_rhs_python_scalar=False, + supports_fwgrad_bwgrad=True, + rhs_make_tensor_kwargs=dict(exclude_zero=False), + skips=( + # RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + # dispatch to lazy test failed + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'), + )), + BinaryUfuncInfo('mul', + aliases=('multiply',), + dtypes=all_types_and_complex_and(torch.chalf, torch.float16, torch.bfloat16, torch.bool), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + error_inputs_sparse_func=error_inputs_sparse_mul, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_bsc)), + BinaryUfuncInfo('sub', + # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate + ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)), + aliases=('subtract',), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_add_sub, + supports_two_python_scalars=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0), + torch.bfloat16: tol(atol=1e-5, rtol=5e-3), + torch.complex32: tol(atol=1e-5, rtol=1e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_comprehensive', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_quick', device_type='cpu'), + ), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.uint8,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + )), + OpInfo('addmm', + # This addmm OpInfo is for when alpha and beta are not both equal to 1. + # alpha=beta=1 is tested in the following opinfo, because that special case will + # trigger addmm being decomposed by a jit pass. + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=sample_inputs_addmm, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('addmm', + # When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add. + variant_test_name='decomposed', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + autodiff_nonfusible_nodes=['aten::add', 'aten::mm'], + sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1), + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + # https://github.com/pytorch/pytorch/issues/71784 + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.float16,)), + )), + OpInfo('addmv', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, + torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-5, rtol=3e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ], + sample_inputs_func=sample_inputs_addmv), + OpInfo('addbmm', + ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M), + np.multiply(np.asarray(alpha, dtype=batch1.dtype), + np.sum(np.matmul(batch1, batch2), axis=0))), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05), + torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_numpy_refs'), + # MPS has slightly worse precision. Is this acceptable? + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-04, rtol=1.3e-04), + torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_numpy_ref_mps'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + 'TestConsistency', + 'test_output_match', + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.5e-05, rtol=1e-05)}), + 'TestCommon', 'test_out'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=6e-3, rtol=6e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ], + skips=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + # addbmm does not correctly warn when resizing out= inputs + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # https://github.com/pytorch/pytorch/issues/55907 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + ), + sample_inputs_func=sample_inputs_addbmm), + OpInfo('baddbmm', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, + torch.bfloat16), + backward_dtypesIfCUDA=floating_types_and(torch.float16, + *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else [], + torch.complex64, torch.complex128), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestMathBits', 'test_conj_view', device_type='cuda'), + ], + sample_inputs_func=sample_inputs_baddbmm, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('dot', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_dot_vdot, + error_inputs_func=error_inputs_dot_vdot, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('vdot', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_dot_vdot, + error_inputs_func=error_inputs_dot_vdot, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('bmm', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), + "TestCommon", "test_out") + ), + sample_inputs_func=sample_inputs_bmm), + OpInfo('mv', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_mv), + OpInfo('addr', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + # Reference: https://github.com/pytorch/pytorch/issues/50747 + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/50747 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)), + ), + sample_inputs_func=sample_inputs_addr, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('addcmul', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # TODO: update sample inputs with for_inplace_variant kwarg to support this test + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + ), + sample_inputs_func=sample_inputs_addcmul_addcdiv, + reference_inputs_func=partial( + reference_inputs_elementwise_ternary, sample_inputs_func=reference_inputs_addcmul_addcdiv)), + OpInfo('addcdiv', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # TODO: update sample inputs with for_inplace_variant kwarg to support this test + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_variant_consistency_eager'), + ), + sample_inputs_func=sample_inputs_addcmul_addcdiv, + reference_inputs_func=partial( + reference_inputs_elementwise_ternary, sample_inputs_func=reference_inputs_addcmul_addcdiv)), + UnaryUfuncInfo('asin', + aliases=('arcsin', ), + ref=np.arcsin, + domain=(-1, 1), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}), + 'TestUnaryUfuncs', device_type='cuda'), + precisionOverride({torch.bfloat16: 1e-2}), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + # NOTE: derivative for inplace asinh is not implemented + UnaryUfuncInfo('asinh', + aliases=('arcsinh', ), + ref=np.arcsinh, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('atan', + aliases=('arctan', ), + ref=np.arctan, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + active_if=TEST_WITH_ROCM, device_type='cuda', dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=TEST_WITH_ROCM, device_type='cuda', dtypes=[torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + BinaryUfuncInfo('atan2', + aliases=('arctan2',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + supports_rhs_python_scalar=False, + skips=( + # Incorrectly attempts to use a scalar for the second argument + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + )), + UnaryUfuncInfo('atanh', + aliases=('arctanh', ), + ref=np.arctanh, + domain=(-1, 1), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=TEST_WITH_ROCM, device_type='cuda', dtypes=[torch.complex128]), + )), + OpInfo('allclose', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + ref=np.allclose, + supports_autograd=False, + supports_forward_ad=False, + sample_inputs_func=sample_inputs_allclose, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + ), + supports_out=False), + OpInfo('broadcast_to', + ref=np.broadcast_to, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_broadcast_to), + OpInfo('broadcast_shapes', + op=torch.broadcast_shapes, + ref=np.broadcast_shapes if np.lib.NumpyVersion(np.__version__) >= '1.20.0' else None, + dtypes=_dispatch_dtypes((torch.float32,)), + supports_out=False, + supports_gradgrad=False, + assert_autodiffed=False, + supports_autograd=False, + supports_scripting=False, + sample_inputs_func=sample_inputs_broadcast_shapes, + skips=( + # https://github.com/pytorch/pytorch/issues/64997 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # skip dtype tests since broadcast_shape is not device dependent. + # having dtypes limited to torch.float32 would cause test_dtypes to report unexpected success + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('broadcast_tensors', + ref=np.broadcast_arrays, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_broadcast_tensors, + reference_inputs_func=reference_inputs_broadcast_tensors, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # https://github.com/pytorch/pytorch/issues/64997 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + )), + OpInfo('block_diag', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # Default batching rule in core doesn't work for ops with TensorList args + check_batched_forward_grad=False, + skips=( + # https://github.com/pytorch/pytorch/issues/64997 + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + sample_inputs_func=sample_inputs_block_diag), + UnaryUfuncInfo('bitwise_not', + ref=np.bitwise_not, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.invert, + supports_autograd=False), + BinaryUfuncInfo('bitwise_left_shift', + op=torch.bitwise_left_shift, + dtypes=integral_types(), + dtypesIfCUDA=integral_types(), + operator_variant=operator.lshift, + inplace_operator_variant=operator.ilshift, + supports_autograd=False, + supports_one_python_scalar=True, + rhs_make_tensor_kwargs=dict(low=0), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + )), + BinaryUfuncInfo('bitwise_right_shift', + op=torch.bitwise_right_shift, + dtypes=integral_types(), + dtypesIfCUDA=integral_types(), + operator_variant=operator.rshift, + inplace_operator_variant=operator.irshift, + supports_autograd=False, + supports_one_python_scalar=True, + rhs_make_tensor_kwargs=dict(low=0), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('combinations', + op=torch.combinations, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + supports_out=False, + sample_inputs_func=sample_inputs_combinations), + OpInfo('cartesian_prod', + op=torch.cartesian_prod, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_cartesian_prod, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 + DecorateInfo(unittest.expectedFailure, + 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + )), + OpInfo('cdist', + dtypes=floating_types(), + supports_out=False, + supports_gradgrad=False, + assert_autodiffed=False, + sample_inputs_func=sample_inputs_cdist), + UnaryUfuncInfo('ceil', + ref=np.ceil, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + ), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + OpInfo('cholesky', + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_cholesky, + gradcheck_wrapper=gradcheck_wrapper_hermitian_input, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],), + OpInfo('cholesky_inverse', + dtypes=floating_and_complex_types(), + backward_dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + check_batched_gradgrad=True, + sample_inputs_func=sample_inputs_linalg_cholesky_inverse, + gradcheck_wrapper=gradcheck_wrapper_triangular_input_real_positive_diagonal, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # Strides are not the same! Original strides were ((4, 2, 1),) and strides are now ((4, 1, 2),) + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),)), + OpInfo('cholesky_solve', + op=torch.cholesky_solve, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_cholesky_solve, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), + OpInfo('chunk', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_chunk, + reference_inputs_func=reference_inputs_chunk, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('unsafe_chunk', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_chunk, + check_batched_forward_grad=False, + reference_inputs_func=reference_inputs_chunk, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('clone', + ref=np.copy, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_clone_contiguous, + reference_inputs_func=reference_inputs_clone_contiguous, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + # TypeError: _copy_dispatcher() got an unexpected keyword argument 'memory_format' + # (NumPy reference needs to be extended with memory_format) + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref_mps'), + ),), + OpInfo('contiguous', + op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_clone_contiguous, + reference_inputs_func=reference_inputs_clone_contiguous, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_fusible_nodes=['aten::contiguous'], + assert_jit_shape_analysis=True, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + OpInfo('sum_to_size', + op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_sum_to_size, + error_inputs_func=error_inputs_sum_to_size, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float,)), + )), + OpInfo('clamp', + aliases=('clip',), + ref=_clamp_numpy, + dtypes=all_types_and(torch.bfloat16, torch.half), + sample_inputs_func=sample_inputs_clamp, + reference_inputs_func=partial(reference_inputs_elementwise_ternary, sample_inputs_func=sample_inputs_clamp), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # NNC appear to not handle boolean clamp + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bool,)), + )), + UnaryUfuncInfo('positive', + ref=np.positive, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + ), + UnaryUfuncInfo('conj', + ref=np.conj, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, + torch.half, torch.chalf), + supports_sparse=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + supports_out=False), + UnaryUfuncInfo('conj_physical', + decomp_aten_name='_conj_physical', + ref=np.conj, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, + torch.half, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + # RuntimeError: inputSet && outputSet + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":118, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )), + DecorateInfo(unittest.skip("Skipped! conj_physical_ not implemented for sparse"), + 'TestSparseUnaryUfuncs', 'test_inplace'), + )), + OpInfo('resolve_conj', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_view_as_real, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo('resolve_neg', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_view_as_real, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + ), + OpInfo('view_as_real', + dtypes=complex_types(), + supports_forward_ad=True, + supports_out=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_view_as_real, + test_conjugated_samples=False, + ), + OpInfo('view_as_complex', + dtypes=floating_types_and(torch.half), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + test_neg_view=False, + sample_inputs_func=sample_inputs_view_as_complex, + skips=( + # RuntimeError: Tensor must have a last dimension with stride 1 + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"), + # RuntimeError: "eq_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.half,)), + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + BinaryUfuncInfo('complex', + dtypes=floating_types_and(torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + error_inputs_func=error_inputs_complex, + skips=( + # Tests don't account for complex's type promotion semantics + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),)), + BinaryUfuncInfo('copysign', + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + promotes_int_to_float=True, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True), + OpInfo('corrcoef', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_corrcoef, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + ), + supports_out=False), + UnaryUfuncInfo('cos', + ref=np.cos, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + handles_large_floats=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), + # This fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + )), + UnaryUfuncInfo('cosh', + ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (6000,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + )), + OpInfo('cov', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_cov, + error_inputs_func=error_inputs_cov, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + # Float did not match double + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), + # Jacobian mismatch + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.skip("Barely fails"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507) + # RuntimeError: + # undefined value tensor: + # File "", line 3 + # def the_method(i0): + # return torch.cov(i0, correction=0, fweights=None, aweights=tensor([0.0518, 0.4681], dtype=torch.float32, requires_grad=True)) # noqa: B950 + # ~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('cross', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_cross, + supports_fwgrad_bwgrad=True, + supports_out=True, + supports_forward_ad=True), + OpInfo('cumsum', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # cumsum does not handle correctly out= dtypes + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + sample_inputs_func=sample_inputs_cumulative_ops), + OpInfo('cumprod', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # cumprod does not handle correctly out= dtypes + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + # gradgradcheck fails in fast_mode=True: #56275 + sample_inputs_func=sample_inputs_cumprod, + gradcheck_fast_mode=False), + OpInfo('cummax', + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('cummin', + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + UnaryUfuncInfo('deg2rad', + ref=np.radians, + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True), + OpInfo('diff', + op=torch.diff, + # np.diff has np._NoValue as default values for prepend and append, compare_with_reference breaks if prepend/append + # are set as None when converting to numpy + ref=lambda input, n=1, dim=-1, prepend=np._NoValue, append=np._NoValue: ( + np.diff(input, n, dim, np._NoValue if prepend is None else prepend, np._NoValue if append is None else append) + ), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diff, + error_inputs_func=error_inputs_diff, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + )), + BinaryUfuncInfo('div', + aliases=('divide',), + variant_test_name='no_rounding_mode', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + promotes_int_to_float=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + assert_autodiffed=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True),), + BinaryUfuncInfo('div', + aliases=('divide',), + variant_test_name='trunc_rounding', + dtypes=all_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="trunc")), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + assert_autodiffed=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True), + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + skips=( + # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), + )), + BinaryUfuncInfo('div', + aliases=('divide',), + variant_test_name='floor_rounding', + dtypes=all_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_elementwise_binary, sample_kwargs=dict(rounding_mode="floor")), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + assert_autodiffed=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True), + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + skips=( + # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), + )), + BinaryUfuncInfo('true_divide', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + promotes_int_to_float=True, + supports_fwgrad_bwgrad=True, + supports_two_python_scalars=True, + rhs_make_tensor_kwargs=dict(exclude_zero=True)), + OpInfo('equal', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + ref=lambda input, other: (input == other).all(), + sample_inputs_func=sample_inputs_equal, + supports_autograd=False, + supports_tracing=False, + skips=( + )), + UnaryUfuncInfo('exp', + ref=np_unary_ufunc_integer_promotion_wrapper(np.exp), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + OpInfo('expand', + op=lambda self, shape: self.expand(shape), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_expand, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + OpInfo('expand_as', + op=lambda self, other: self.expand_as(other), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_expand_as, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),), + ), + OpInfo('diag', + ref=np.diag, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_diag, + error_inputs_func=error_inputs_diag), + OpInfo('diag_embed', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + reference_inputs_func=reference_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed), + OpInfo('diagonal', + aten_backward_name='diagonal_backward', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + reference_inputs_func=reference_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed), + OpInfo('diagonal_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_diag_embed, + reference_inputs_func=reference_inputs_diagonal_diag_embed, + error_inputs_func=error_inputs_diagonal_diag_embed), + OpInfo('diagonal_scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_diagonal_scatter), + BinaryUfuncInfo('eq', + ref=np.equal, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + always_returns_bool=True, + supports_autograd=False, + sample_inputs_func=sample_inputs_comparison_ops, + skips=( + )), + BinaryUfuncInfo('fmax', + op=torch.fmax, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + )), + BinaryUfuncInfo('fmin', + op=torch.fmin, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + )), + BinaryUfuncInfo('fmod', + ref=np.fmod, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=None, + rhs_make_tensor_kwargs={'exclude_zero': True}, + decorators=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_contig_vs_every_other', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_non_contig', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + )), + BinaryUfuncInfo('remainder', + ref=np.remainder, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=None, + operator_variant=operator.mod, + inplace_operator_variant=operator.imod, + supports_one_python_scalar=True, + rhs_make_tensor_kwargs={'exclude_zero': True}, + decorators=( + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_contig_vs_every_other', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_non_contig', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bfloat16,)), + # Fails on XLA + # False is not true : Tensors failed to compare as equal! + # Attempted to compare equality of tensors with different dtypes + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), + )), + UnaryUfuncInfo('frac', + ref=lambda x: np.modf(x)[0], + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)), + # 76047 + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.bfloat16, torch.float32, torch.float64)), + )), + OpInfo('stft', + decorators=[ + skipCPUIfNoFFT, + DecorateInfo(unittest.skip("Skipped! stft does not match the native function"), + 'TestJit', 'test_variant_consistency_jit'), + ], + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_stft, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + ), + OpInfo('istft', + dtypes=complex_types(), + sample_inputs_func=sample_inputs_istft, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + decorators=( + DecorateInfo(unittest.skip("Skipped! istft does not match the native function"), + 'TestJit', 'test_variant_consistency_jit'), + ), + skips=( + skipCPUIfNoFFT, + # gradcheck fails on ROCm (gh-68429) + # grad is computed improperly (probably for weights tensor) + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), + # Pre-existing condition (calls .item); needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + )), + UnaryUfuncInfo('floor', + ref=np.floor, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + ), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + OpInfo('flip', + op=torch.flip, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_flip, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('fliplr', + op=torch.fliplr, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_fliplr_flipud, + error_inputs_func=error_inputs_fliplr, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('flipud', + op=torch.flipud, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_fliplr_flipud, + error_inputs_func=error_inputs_flipud, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('sparse.sampled_addmm', + dtypes=floating_and_complex_types(), + supports_autograd=True, + sample_inputs_func=sample_inputs_sparse_sampled_addmm, + decorators=[ + skipCUDAIf(not ((_get_torch_cuda_version() >= (11, 3)) + or (_get_torch_rocm_version() >= (5, 2))), + "cusparseSDDMM was added in 11.2.1"), + skipCPUIfNoMklSparse, ], + skips=( + # NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # RuntimeError: Sparse CSR tensors do not have strides. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), + # RuntimeError: sampled_addmm: Expected result to have sparse csr layout, but got Strided + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: unsupported memory format option Preserve + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: sparse_mask does not support automatic differentiation for outputs with complex dtype + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + # RuntimeError: sparse_mask does not support automatic differentiation for outputs with complex dtype. + # RuntimeError: Sparse CSR tensors do not have is_contiguous + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + )), + OpInfo('sparse.mm', + dtypes=floating_types_and(torch.bfloat16), + variant_test_name='reduce', + supports_autograd=True, + supports_out=False, + supports_gradgrad=False, + supports_forward_ad=False, + sample_inputs_func=sample_inputs_sparse_mm_reduce, + decorators=[onlyCPU], + skips=( + # NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # RuntimeError: Sparse CSR tensors do not have strides. + DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: unsupported memory format option Preserve + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # RuntimeError: Sparse CSR tensors do not have is_contiguou + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + # RuntimeError: Sparse CSR tensors do not have strides + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_fail_gradgrad'), + )), + UnaryUfuncInfo('i0', + ref=np_unary_ufunc_integer_promotion_wrapper( + scipy.special.i0) if TEST_SCIPY else None, + aliases=('special.i0',), + decorators=(precisionOverride({torch.bfloat16: 3e-1, + torch.float16: 5e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + backward_dtypes=floating_types(), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_i0_i1, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.int8,)), + )), + BinaryUfuncInfo('floor_divide', + ref=_floor_divide_np, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_autograd=False, + rhs_make_tensor_kwargs=dict(exclude_zero=True), + supports_two_python_scalars=True, + skips=( + # AssertionError: Results of original model and exported/imported version of model differed + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + # bfloat16 floor_divide compared with a float32 reference works inconsistently + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + dtypes=(torch.bfloat16,)), + # int8 floor divide has different results for -128 // -1 vs. NumPy + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=(torch.int8,)), + # The following tests fails on some jobs + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', + dtypes=(torch.float16,)), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=5e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + )), + UnaryUfuncInfo('frexp', + op=torch.frexp, + ref=np.frexp, + dtypes=floating_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half), + # skip testing torch.frexp as it is not supported by ROCm platform yet + decorators=[], + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs, + # while theses tests currently requires output to a single tensor. + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_non_contig_expand'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), + + # skips test_reference_numerics due to error in Windows CI. + # The np.frexp returns exponent as np.intc dtype on Windows platform, + # and np.intc does not have the correspond torch dtype + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=IS_WINDOWS), + )), + UnaryUfuncInfo('log1p', + ref=np.log1p, + aliases=('special.log1p',), + domain=(-1, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.bfloat16: 1e-1}),), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True, + promotes_int_to_float=True), + BinaryUfuncInfo('ge', + ref=np.greater_equal, + aliases=('greater_equal',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('geqrf', + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_qr_geqrf, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + supports_autograd=False, + skips=( + # FIXME: geqrf can't forward with complex inputs that require grad + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + )), + BinaryUfuncInfo('gt', + ref=np.greater, + aliases=('greater',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + UnaryUfuncInfo('imag', + ref=np.imag, + dtypes=complex_types_and(torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + # RuntimeError: view_as_real doesn't work on unresolved conjugated tensors. + check_batched_forward_grad=False, + skips=( + # Skip since real and imag don't have out variants. + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), + )), + OpInfo('gradient', + dtypes=floating_and_complex_types_and(torch.int8, torch.int16, + torch.int32, torch.int64, + torch.bfloat16, torch.half), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # following tests give a runtime error with undefined value tensor + # see discussion : https://github.com/pytorch/pytorch/issues/56660 + # RuntimeError: + # Arguments for call are not valid. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # noqa: B950 + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + ), + supports_inplace_autograd=False, + sample_inputs_func=sample_inputs_gradient, + error_inputs_func=error_inputs_gradient), + OpInfo('isin', + dtypes=all_types(), + dtypesIfCUDA=all_types_and(torch.half), + supports_autograd=False, + sample_inputs_func=sample_inputs_isin), + OpInfo('kthvalue', + dtypes=all_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and(torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_kthvalue, + error_inputs_func=error_inputs_kthvalue), + BinaryUfuncInfo('le', + ref=np.less_equal, + aliases=('less_equal',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('linspace', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_linspace, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + OpInfo('linspace', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_linspace_tensor_overload, + variant_test_name="tensor_overload", + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + OpInfo('logspace', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_logspace, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + OpInfo('logspace', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + is_factory_function=True, + supports_out=True, + supports_autograd=False, + error_inputs_func=error_inputs_linspace, + sample_inputs_func=sample_inputs_logspace_tensor_overload, + variant_test_name="tensor_overload", + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive', + dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), + # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API + # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! + # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. + # CUDA driver allocated memory was 1254555648 and is now 1242955776. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.cfloat,), device_type="cuda"), + )), + UnaryUfuncInfo('log', + ref=np.log, + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.chalf), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + # log(z)->-inf for |z|->0 + reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), + UnaryUfuncInfo('log10', + ref=np.log10, + domain=(0, None), + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + # log10(z)->-inf for |z|->0 + reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), + UnaryUfuncInfo('log2', + ref=np.log2, + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 1e-1}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + ), + # log2(z)->-inf for |z|->0 + reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), + BinaryUfuncInfo('ldexp', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_inplace_autograd=False, + promotes_int_to_float=True, + supports_out=True, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: mul(): functions with out=... arguments don't support + # automatic differentiation, but one of the arguments requires grad + # https://github.com/pytorch/pytorch/issues/68966 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.complex64: tol(atol=1e-05, rtol=1e-05) + }), + 'TestCommon', device_type='cpu', + ), + ], ), + BinaryUfuncInfo('logaddexp', + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfROCM=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False, + skips=( + # TODO: FIXME: RuntimeError: not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), + )), + OpInfo('logaddexp2', + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16), + dtypesIfROCM=floating_types_and(torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_logaddexp), + UnaryUfuncInfo('logical_not', + ref=np.logical_not, + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 5e-1}),), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + skips=( + # The function variant always returns BoolTensor + # while the inplace variant preserves the input dtype. + # >>> t = torch.randn(3) + # >>> torch.logical_not(t) + # tensor([False, False, False]) + # >>> torch.logical_not(t).dtype + # torch.bool + # >>> t.logical_not_().dtype + # torch.float32 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), + )), + BinaryUfuncInfo('lt', + ref=np.less, + aliases=('less',), + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('lu_unpack', + op=torch.lu_unpack, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=(skipCPUIfNoLapack,), + sample_inputs_func=sample_inputs_lu_unpack), + OpInfo('lu', + op=torch.lu, + dtypes=floating_and_complex_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_lu, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + # we skip jit tests because `lu` is a torch function + # RuntimeError: + # 'Tensor (inferred)' object has no attribute or method 'lu'.: + # File "", line 3 + # def the_method(i0): + # return i0.lu(True, True) + # ~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError not raised: Expected RuntimeError when calling with input.device=cpu and out.device=cuda + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('lu_solve', + op=torch.lu_solve, + dtypes=floating_and_complex_types(), + supports_forward_ad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_lu_solve, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Tests different backward paths"), + "TestCommon", "test_floating_inputs_are_differentiable"),), + decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver]), + OpInfo('masked_fill', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_masked_fill, + error_inputs_func=error_inputs_masked_fill, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + supports_out=False), + OpInfo('masked_scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_masked_scatter, + error_inputs_func=error_inputs_masked_scatter, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_out=False, + skips=( + )), + OpInfo('masked_select', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_masked_select, + error_inputs_func=error_inputs_masked_select, + skips=( + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + )), + OpInfo('matrix_exp', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + aliases=('linalg.matrix_exp',), + sample_inputs_func=sample_inputs_matrix_exp, + # Needs to construct a 2nx2n matrix by copy_ ing into it + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # mexp does not support bf16 and fp16 + DecorateInfo(unittest.skip('Skipped!'), 'TestInductorOpInfo', 'test_comprehensive', + dtypes=[torch.half], device_type="cpu"), + ), + supports_out=False, + ), + OpInfo('matmul', + aliases=('linalg.matmul',), + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + sample_inputs_func=partial(sample_inputs_matmul, is_rmatmul=False), + decorators=[ + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + # ROCm intermittently fails the test with standard atol/rtol + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), + 'TestCommon', 'test_noncontiguous_samples', device_type='cuda', + active_if=TEST_WITH_ROCM), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), + 'TestCommon', 'test_out', device_type='cuda', + active_if=TEST_WITH_ROCM), + # mv for the sample with shapes (S, S, M, M), (M,) has some variance in the + # backward on CPU + DecorateInfo(toleranceOverride({torch.float32: tol(atol=0, rtol=1e-5)}), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cpu'), + DecorateInfo( + toleranceOverride({ + torch.float32: tol(atol=1e-5, rtol=1e-5), + torch.complex64: tol(atol=1e-5, rtol=1e-5), + }), + "TestDecomp", "test_comprehensive", device_type="cuda", + ), + ], + skips=( + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # https://github.com/pytorch/pytorch/issues/67470 + DecorateInfo(unittest.skip("67470!"), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cpu', dtypes=(torch.long,)), + # AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', + device_type='xla', dtypes=(torch.long,)), + # https://github.com/pytorch/pytorch/issues/71774 + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.long,)), + )), + OpInfo('max', + variant_test_name='reduction_with_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + sample_inputs_func=sample_inputs_max_min_reduction_with_dim, + supports_fwgrad_bwgrad=True, + skips=( + ), + supports_forward_ad=True), + OpInfo('max', + variant_test_name='reduction_no_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_max_min_reduction_no_dim, + skips=( + )), + OpInfo('median', + dtypes=all_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and(torch.float16), + # TODO: some signatures of median do support out + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_median, + sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), + OpInfo('nanmedian', + dtypes=all_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and(torch.float16), + # TODO: some signatures of nanmedian do support out + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), + OpInfo('var_mean', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + # TODO: some signatures of var_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + )), + OpInfo('var_mean', + variant_test_name='unbiased', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + # TODO: some signatures of var_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + )), + OpInfo('std_mean', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + # TODO: some signatures of std_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + )), + OpInfo('std_mean', + variant_test_name='unbiased', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + # TODO: some signatures of var_mean do support out + supports_out=False, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), + "TestDecomp", "test_comprehensive", device_type="cuda"), + )), + OpInfo('meshgrid', + variant_test_name='variadic_tensors', + ref=np.meshgrid, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), + sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'), + skips=[ + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # meshgrid is defined in torch.functional to take a + # variadic list of tensors. Variadic parameters are not + # compatible with the normalize operator tests. + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Skip operator schema test because this is a functional and not an operator + DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + ], + supports_out=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False,), + OpInfo('meshgrid', + variant_test_name='list_of_tensors', + # Unlike the variant above, we do not use np.meshgrid as a + # ref since it does not officially support list of numpy + # arrays. + dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), + sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'), + skips=[ + # meshgrid is defined in torch.functional to take a + # variadic list of tensors. Variadic parameters are not + # compatible with the normalize operator tests. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + ], + assert_autodiffed=True, + supports_out=False, + autodiff_nonfusible_nodes=[], + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False,), + OpInfo('min', + variant_test_name='reduction_with_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + sample_inputs_func=sample_inputs_max_min_reduction_with_dim, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + skips=( + )), + OpInfo('min', + variant_test_name='reduction_no_dim', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_max_min_reduction_no_dim, + skips=( + )), + OpInfo('quantile', + dtypes=floating_types(), + sample_inputs_func=sample_inputs_reduction_quantile, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which + # does not have a batching rule in core + check_batched_forward_grad=False), + OpInfo('nanquantile', + dtypes=floating_types(), + sample_inputs_func=sample_inputs_reduction_quantile, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which + # does not have a batching rule in core + check_batched_forward_grad=False), + BinaryUfuncInfo( + 'max', + aliases=('maximum',), + variant_test_name='binary', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + ref=np.maximum, + supports_rhs_python_scalar=False, + skips=( + # Incorrectly attempts to use a scalar for the second argument + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), + )), + BinaryUfuncInfo( + 'maximum', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ref=np.maximum, + supports_rhs_python_scalar=False, + skips=( + # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), + )), + BinaryUfuncInfo( + 'min', + aliases=('minimum',), + variant_test_name='binary', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + ref=np.minimum, + supports_rhs_python_scalar=False, + skips=( + # Incorrectly attempts to use a scalar for the second argument + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + )), + BinaryUfuncInfo( + 'minimum', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ref=np.minimum, + supports_rhs_python_scalar=False, + skips=( + # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + ), + ), + BinaryUfuncInfo('logical_and', + ref=np.logical_and, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + always_returns_bool=True, + supports_rhs_python_scalar=False), + BinaryUfuncInfo('logical_or', + ref=np.logical_or, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + always_returns_bool=True, + supports_rhs_python_scalar=False), + BinaryUfuncInfo('logical_xor', + ref=np.logical_xor, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_autograd=False, + always_returns_bool=True, + supports_rhs_python_scalar=False, + skips=( + )), + BinaryUfuncInfo('bitwise_and', + ref=np.bitwise_and, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.and_, + inplace_operator_variant=operator.iand, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # RuntimeError: "bitwise_and_cuda" not implemented for 'Half' + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', + 'test_type_promotion', device_type='cuda'), + )), + BinaryUfuncInfo('bitwise_or', + ref=np.bitwise_or, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.or_, + inplace_operator_variant=operator.ior, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # TODO: FIXME: RuntimeError: "bitwise_or_cuda" not implemented for 'Half' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + )), + BinaryUfuncInfo('bitwise_xor', + ref=np.bitwise_xor, + dtypes=integral_types_and(torch.bool), + operator_variant=operator.xor, + inplace_operator_variant=operator.ixor, + supports_autograd=False, + supports_one_python_scalar=True, + skips=( + # TODO: FIXME: RuntimeError: "bitwise_xor_cuda" not implemented for 'Half' + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion', + device_type='cuda'), + )), + BinaryUfuncInfo('heaviside', + ref=lambda a, b: ( + # necessary because np.heaviside incorrectly returns float64 when passed args of dtype int64 + np.int64(np.heaviside(a, b)) if a.dtype == np.int64 and b.dtype == np.int64 else np.heaviside(a, b) + ), + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + supports_autograd=False, + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: heaviside is not yet implemented for tensors with different dtypes. + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + # PyTorch's heaviside does not appear to propagate NaNs + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + )), + BinaryUfuncInfo('lcm', + ref=np.lcm, + dtypes=integral_types_and(), + supports_autograd=False, + supports_rhs_python_scalar=False), + BinaryUfuncInfo('gcd', + ref=np.gcd, + dtypes=integral_types_and(), + supports_autograd=False, + supports_rhs_python_scalar=False, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.int8,)),)), + BinaryUfuncInfo('isclose', + ref=np.isclose, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_isclose, + error_inputs_func=error_inputs_isclose, + supports_autograd=False, + supports_out=False, + supports_rhs_python_scalar=False, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_numpy_refs', dtypes=(torch.complex128,)), + # RuntimeError: Short did not match Int + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + )), + # `softmax` supports different dtypes based on whether `dtype` argument, + # is passed or not. Hence two OpInfo entries, one with dtype and other without. + # https://github.com/pytorch/pytorch/issues/68752 + OpInfo('softmax', + aliases=('special.softmax', 'nn.functional.softmax',), + aten_name='softmax', + aten_backward_name='_softmax_backward_data', + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_softmax_variant, + assert_jit_shape_analysis=True, + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=True), + OpInfo('softmax', + aliases=('special.softmax', 'nn.functional.softmax',), + variant_test_name="with_dtype", + aten_name='softmax', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=True), + OpInfo( + '_softmax_backward_data', + op=torch.ops.aten._softmax_backward_data, + aten_name='_softmax_backward_data', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_softmax_backward_data, + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cpu'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ), + ), + # `softmin` supports different dtypes based on whether `dtype` argument, + # is passed or not. Hence two OpInfo entries, one with dtype and other without. + # https://github.com/pytorch/pytorch/issues/68752 + OpInfo('nn.functional.softmin', + aten_name='softmin', + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_softmax_variant, + assert_jit_shape_analysis=False, + assert_autodiffed=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('nn.functional.softmin', + variant_test_name="with_dtype", + aten_name='softmin', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), + assert_autodiffed=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo( + "nn.functional.cross_entropy", + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_cross_entropy, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-3)}), + "TestJit", + "test_variant_consistency_jit", + device_type="cpu", + ), + ), + skips=( + # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 1536 + # test_ops.TestJitCUDA.test_variant_consistency_jit_nn_functional_cross_entropy_cuda_float32 leaked + # 1536 bytes CUDA memory on device 0 + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ) + ), + OpInfo('nn.functional.normalize', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_normalize, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True), + OpInfo('aminmax', + ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)), + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + decorators=(onlyNativeDeviceTypes,), + supports_autograd=False, + sample_inputs_func=sample_inputs_aminmax, + error_inputs_func=error_inputs_aminmax_amax_amin), + OpInfo('as_strided', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_as_strided, + skips=( + # Note: This xfail is fine -- it's inherent to how as_strided works + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), + # AssertionError: False is not true : Scalars failed to compare as equal! + DecorateInfo(unittest.skip("Errors when storage_offset is included"), + 'TestCommon', 'test_variant_consistency_eager'), + # Not close + DecorateInfo(unittest.skip("Errors when storage_offset is included"), + 'TestCommon', 'test_complex_half_reference_testing'), + # Not close + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Numerous errors"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Numerous errors"), 'TestBwdGradients'), + )), + OpInfo('as_strided', + variant_test_name='partial_views', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_as_strided_partial_views, + skips=( + # Note: This xfail is fine -- it's inherent to how as_strided works + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), + # RuntimeError: This operator is not Composite Compliant: the + # storage_offset of the tensor was modified directly without + # going through the PyTorch dispatcher. + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + + # These fail because the test changes the input's in-memory layout + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_inplace_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_inplace_grad'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_inplace_gradgrad'), + DecorateInfo(unittest.expectedFailure, 'TestProxyTensorOpInfo', + 'test_make_fx_symbolic_exhaustive_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + # Fail but are also flaky + DecorateInfo(unittest.skip("Test changes in memory layout"), 'TestMathBits'), + DecorateInfo(unittest.skip("Modifies input strides and storage_offset"), 'TestCommon', + 'test_non_standard_bool_values'), + # RuntimeError: setStorage: sizes [2, 2], strides [1, 2], storage offset 10, and itemsize 2 requiring a + # storage size of 28 are out of bounds for storage of size 20 + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace_all_strides'), + )), + OpInfo('as_strided_scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_as_strided_scatter, + error_inputs_func=error_inputs_as_strided_scatter, + skips=( + DecorateInfo(unittest.skip('Works for int64, fails for everything else'), 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950 + DecorateInfo(unittest.skip('Fails in most cases, passes on LAZY for some reason'), 'TestCommon', 'test_variant_consistency_eager'), # noqa: B950 + DecorateInfo(unittest.skip('Fails on cuda + rocm'), 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.skip('Passes on complex128 and float64 only'), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + # AssertionError: Tensor-likes are not close! (new_empty_strided.default) + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_comprehensive'),)), + OpInfo('native_layer_norm', + aten_name='native_layer_norm', + ref=reference_native_layer_norm, + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + assert_jit_shape_analysis=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_native_layer_norm, + error_inputs_func=error_inputs_native_layer_norm, + skips=( + # IndexError: tuple index out of range + DecorateInfo(unittest.skip('Skipped!'), 'TestFwdGradients', 'test_forward_mode_AD'), + # Tests fail when weight=None and bias is defined + # https://github.com/pytorch/pytorch/issues/79705 + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), + # JIT test also tries to compute double backward, which fails + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + )), + OpInfo('native_batch_norm', + aten_name='native_batch_norm', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + sample_inputs_func=sample_inputs_native_batch_norm, + skips=( + # NotImplementedError: Could not run + # 'aten::native_batch_norm.out' with arguments from the 'CPU' backend. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"), + # RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0] + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"), + # Problem with _get_numerical_jacobian + # IndexError: tuple index out of range + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # https://github.com/pytorch/pytorch/issues/85960 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + # AssertionError: Booleans mismatch: True is not False + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_autocast'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}), + "TestCompositeCompliance", "test_forward_ad"), + ) + ), + OpInfo('_native_batch_norm_legit', + aten_name='_native_batch_norm_legit', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + sample_inputs_func=sample_inputs__native_batch_norm_legit, + skips=( + # NotImplementedError: Could not run + # 'aten::native_batch_norm.out' with arguments from the 'CPU' backend. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"), + # RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0] + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"), + # Problem with _get_numerical_jacobian + # IndexError: tuple index out of range + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # https://github.com/pytorch/pytorch/issues/85960 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}), + "TestCompositeCompliance", "test_forward_ad"), + ) + ), + OpInfo('nn.functional.cosine_similarity', + aten_name="cosine_similarity", + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_cosine_similarity), + OpInfo('nn.functional.adaptive_avg_pool1d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_avg_pool1d, + sample_inputs_func=sample_inputs_adaptive_avg_pool1d), + OpInfo('nn.functional.adaptive_avg_pool2d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_avg_pool2d(Tensor input, int[2] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, int]'. : + # File "", line 3 + # def the_method(i0): + # return torch.nn.functional.adaptive_avg_pool2d(i0, (None, 7)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_avg_pool2d, + sample_inputs_func=sample_inputs_adaptive_avg_pool2d), + OpInfo('nn.functional.adaptive_avg_pool3d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_avg_pool3d(Tensor input, int[3] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : + # File "", line 3 + # + # def the_method(i0): + # return torch.nn.functional.adaptive_avg_pool3d(i0, (None, None, None)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + # + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_avg_pool3d, + sample_inputs_func=sample_inputs_adaptive_avg_pool3d), + OpInfo('nn.functional.adaptive_max_pool1d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_max_pool1d, + sample_inputs_func=sample_inputs_adaptive_max_pool1d), + OpInfo('nn.functional.adaptive_max_pool2d', + dtypes=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_max_pool2d(Tensor input, int[2] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, int]'. : + # File "", line 3 + # def the_method(i0): + # return torch.nn.functional.adaptive_max_pool2d(i0, (None, 7)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_max_pool2d, + sample_inputs_func=sample_inputs_adaptive_max_pool2d), + OpInfo('nn.functional.adaptive_max_pool3d', + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + decorators=( + # RuntimeError: + # adaptive_max_pool3d(Tensor input, int[3] output_size) -> (Tensor): + # Expected a value of type 'List[int]' for argument 'output_size' but + # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : + # File "", line 3 + # + # def the_method(i0): + # return torch.nn.functional.adaptive_max_pool3d(i0, (None, None, None)) + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + # + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_adaptive_max_pool3d, + sample_inputs_func=sample_inputs_adaptive_max_pool3d), + OpInfo('nn.functional.avg_pool1d', + aten_name='avg_pool1d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.int64, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_avg_pool1d, + sample_inputs_func=sample_inputs_avgpool1d), + OpInfo('nn.functional.avg_pool3d', + aten_name='avg_pool3d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.int64), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_avg_pool3d, + sample_inputs_func=sample_inputs_avgpool3d, + skips=( + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), + )), + OpInfo( + "nn.functional.binary_cross_entropy_with_logits", + aten_name="binary_cross_entropy_with_logits", + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + dtypes=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=sample_inputs_binary_cross_entropy_with_logits, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + 'TestJit', + 'test_variant_consistency_jit', + dtypes=(torch.float32,) + ), + ), + ), + UnaryUfuncInfo( + 'nn.functional.relu', + aten_name="relu", + ref=lambda a: np.where(a <= 0, 0, a), + supports_autograd=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + dtypes=all_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_nn_activation_relu, + supports_out=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True), + OpInfo('nn.functional.conv_transpose1d', + # `ref` for this function is backward of + # corresponding `conv*d` + ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose1d), + aten_name='conv_transpose1d', + aliases=('conv_transpose1d',), + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=sample_inputs_conv_transpose1d, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-2, rtol=5e-2), }), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo( + toleranceOverride({torch.float: tol(atol=1.5e-5, rtol=1.5e-5), }), + 'TestCommon', 'test_numpy_ref_mps'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-3, rtol=2e-3), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ), + skips=( + # Reason for Skip: https://github.com/pytorch/pytorch/pull/79694#issuecomment-1186949486 + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.complex64,)), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float,)), + # RuntimeError: "slow_conv2d_cpu_grad_input" not implemented for 'Long' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.int64,)), + ), + supports_out=False,), + OpInfo('nn.functional.conv_transpose2d', + aten_name='conv_transpose2d', + aliases=('conv_transpose2d',), + # `ref` for this function is backward of + # corresponding `conv*d` + ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose2d), + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=sample_inputs_conv_transpose2d, + # Runs very slowly on slow-gradcheck for complex. + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=2e-05, rtol=5e-05), }), + 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=8e-2, rtol=8e-2), }), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-3, rtol=2e-3), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu')], + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: "slow_conv2d_cpu_grad_input" not implemented for 'Long' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.int64,)), + # Reference: https://github.com/pytorch/pytorch/issues/86356 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.double, torch.cdouble)), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + # AssertionError: None mismatch: torch.complex64 is not None + DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', 'test_custom_rules', + dtypes=(torch.complex64, torch.complex128)), + ), + supports_out=False,), + OpInfo('nn.functional.conv_transpose3d', + aten_name='conv_transpose3d', + aliases=('conv_transpose3d',), + # `ref` for this function is backward of + # corresponding `conv*d` + ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose3d), + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and( + torch.float16, torch.chalf, torch.bfloat16), + sample_inputs_func=sample_inputs_conv_transpose3d, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=5e-2, rtol=5e-2), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), + torch.complex64: tol(atol=1.3e-04, rtol=1.3e-05)}), + 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=2e-04, rtol=2e-04), }), + 'TestCompositeCompliance', 'test_operator', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1.3e-04, rtol=1.3e-06), + torch.complex64: tol(atol=1.3e-04, rtol=1.3e-05)}), + 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-04, rtol=2e-05), }), + 'TestCompositeCompliance', 'test_forward_ad', device_type='cuda', + active_if=TEST_CUDNN), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1e-4)}), + "TestMathBits", "test_conj_view", device_type='cuda'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=9e-2, rtol=9e-2), }), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo( + toleranceOverride({torch.half: tol(atol=1e-3, rtol=2e-1), }), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu')], + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: "slow_conv3d_cpu_grad_input" not implemented for 'Long' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.int64,)), + # Reference: https://github.com/pytorch/pytorch/issues/86356 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', + dtypes=(torch.double, torch.cdouble)), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip('Skipped for ROCm!'), 'TestCommon', 'test_complex_half_reference_testing', + dtypes=[torch.complex32], active_if=TEST_WITH_ROCM), + ), + supports_out=False,), + OpInfo('nn.functional.conv1d', + aliases=('conv1d',), + aten_name='conv1d', + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=sample_inputs_conv1d, + error_inputs_func=error_inputs_conv1d, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=5e-2)}), + 'TestCommon', 'test_complex_half_reference_testing' + ), + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=2e-3, rtol=1e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda', + ), + ), + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Ref: https://github.com/pytorch/pytorch/issues/75309 + # AssertionError: None mismatch: torch.complex128 is not None + DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', + 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), + # Ref: https://github.com/pytorch/pytorch/issues/75309 + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), + ), + supports_expanded_weight=True, + supports_out=False,), + OpInfo('nn.functional.conv2d', + aliases=('conv2d',), + aten_name='conv2d', + dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, + torch.bfloat16), + sample_inputs_func=partial(sample_inputs_conv2d), + error_inputs_func=error_inputs_conv2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}), + 'TestCommon', 'test_complex_half_reference_testing', + ), + ), + skips=( + # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Works on some configs!"), 'TestJit', 'test_variant_consistency_jit'), + # Ref: https://github.com/pytorch/pytorch/issues/75309 + # AssertionError: None mismatch: torch.complex128 is not None + DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', + 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), + ), + supports_expanded_weight=True, + supports_out=False,), + OpInfo('nn.functional.conv3d', + aliases=('conv3d',), + aten_name='conv3d', + dtypes=floating_and_complex_types_and(torch.int64, torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, torch.bfloat16), + sample_inputs_func=sample_inputs_conv3d, + error_inputs_func=error_inputs_conv3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}), + 'TestCommon', 'test_complex_half_reference_testing', + ), + ), + skips=( + # RuntimeError: !lhs.isAliasOf(rhs) INTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: UNSUPPORTED DTYPE: complex + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), + # RuntimeError: Conv3D is not supported on MPS + DecorateInfo(unittest.expectedFailure, 'TestConsistency'), + # AssertionError: Tensor-likes are not close! + # break slow tests + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), + ), + supports_expanded_weight=True, + supports_out=False,), + OpInfo('nn.functional.group_norm', + aten_name='group_norm', + aliases=('group_norm',), + ref=reference_group_norm, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_group_norm, + decorators=[ + # RuntimeError: Cannot insert a Tensor that requires grad as a constant. + # Consider making it a parameter or input, or detaching the gradient + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)) + ], + sample_inputs_func=sample_inputs_group_norm, + reference_inputs_func=reference_inputs_group_norm, + supports_expanded_weight=True,), + OpInfo('nn.functional.instance_norm', + # no ref because instance_norm will often have numerical instability (large numbers or nan) + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + # RuntimeError: Cannot insert a Tensor that requires grad as a constant. + # Consider making it a parameter or input, or detaching the gradient + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', + active_if=TEST_WITH_ROCM) + ], + sample_inputs_func=sample_inputs_instance_norm, + supports_expanded_weight=True,), + OpInfo('nn.functional.layer_norm', + aten_name='layer_norm', + aten_backward_name='layer_norm_backward', + aliases=('layer_norm',), + ref=reference_layer_norm, + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}), + 'TestCommon', 'test_numpy_refs' + ), + DecorateInfo(unittest.skip("Bug in MPS backend!"), 'TestCommon', 'test_numpy_ref_mps'), + ], + sample_inputs_func=sample_inputs_layer_norm, + supports_expanded_weight=True,), + OpInfo('nn.functional.local_response_norm', + dtypes=floating_types_and(torch.int64, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[ + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ], + sample_inputs_func=sample_inputs_local_response_norm,), + OpInfo('constant_pad_nd', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + sample_inputs_func=sample_inputs_constant_pad_nd, + supports_out=False, + skips=( + # bool can't be passed to Scalar arguments in JIT tracer because + # BoolType is not a subtype of ScalarType. + DecorateInfo( + unittest.expectedFailure, 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.bool,)), + )), + OpInfo('nn.functional.pad', + variant_test_name='constant', + aten_name='constant_pad_nd', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'), + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='reflect', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'), + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='replicate', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'), + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='replicate_negative', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and_complex_and(torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_nn_pad_replicate_negative, + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + # Some negative padding cases cause a segfault on MPS + DecorateInfo(unittest.skip("Not fully supported on MPS"), 'TestConsistency'), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False), + OpInfo('nn.functional.pad', + variant_test_name='circular', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_grad=False, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # Doesn't have a corresponding aten operator. + # RuntimeError: falseINTERNAL ASSERT FAILED at + # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), + # Difference from is larger with decomposition new_empty_strided.default than original on output 0 + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_comprehensive'), + ), + supports_out=False), + OpInfo('nn.functional.hardswish', + aten_name="hardswish", + aten_backward_name='hardswish_backward', + supports_autograd=True, + assert_autodiffed=True, + sample_inputs_func=sample_inputs_hardswish, + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_gradgrad=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + autodiff_nonfusible_nodes=["aten::hardswish"]), + OpInfo('nn.functional.unfold', + aten_name='im2col', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_nn_unfold, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + skips=( + # NOTE: this failure may not reproduce consistently on different systems + # false INTERNAL ASSERT FAILED at "...torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185 + DecorateInfo(unittest.skip("Internal assert failed!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='nearest', + supports_autograd=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.uint8, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16, torch.uint8), + sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='nearest-exact', + supports_autograd=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.uint8), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16, torch.uint8), + sample_inputs_func=partial(sample_inputs_interpolate, 'nearest-exact'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: aten::_upsample_nearest_exact*d hit the vmap fallback which is currently disabled + DecorateInfo(unittest.expectedFailure, 'TestOperators', 'test_vmapjvpall_has_batch_rule'), + DecorateInfo(unittest.expectedFailure, 'TestOperators', 'test_vmapvjp_has_batch_rule'), + DecorateInfo(unittest.expectedFailure, 'TestVmapOperatorsOpInfo', 'test_op_has_batch_rule'), + # NotImplementedError: The operator 'aten::_upsample_nearest_exact3d.out' is not currently implemented + # for the MPS device. + DecorateInfo(unittest.expectedFailure, 'TestConsistency'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='linear', + supports_autograd=True, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'linear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='bilinear', + supports_fwgrad_bwgrad=True, + supports_autograd=True, + supports_forward_ad=True, + dtypes=floating_types_and(torch.uint8, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'), + reference_inputs_func=partial(reference_inputs_interpolate, 'bilinear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='bicubic', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'), + reference_inputs_func=partial(reference_inputs_interpolate, 'bicubic'), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='trilinear', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.interpolate', + aten_name="interpolate", + variant_test_name='area', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=partial(sample_inputs_interpolate, 'area'), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('nn.functional.upsample_bilinear', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_upsample, 'bilinear'), + reference_inputs_func=partial(reference_inputs_upsample, 'bilinear'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo('_upsample_bilinear2d_aa', + op=torch.ops.aten._upsample_bilinear2d_aa, + aten_name='_upsample_bilinear2d_aa', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_upsample_aa, 'bilinear'), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), + DecorateInfo(unittest.expectedFailure, 'TestEagerFusionOpInfo', 'test_aot_autograd_symbolic_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + )), + OpInfo( + "nn.functional.soft_margin_loss", + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + # doesn't support grad on target + sample_inputs_func=partial(sample_inputs_loss, rhs_requires_grad=False), + error_inputs_func=error_inputs_soft_margin_loss, + ), + OpInfo('nn.functional.upsample_nearest', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.uint8, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.uint8, torch.bfloat16), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=partial(sample_inputs_upsample, 'nearest'), + skips=( + # RuntimeError: false + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + supports_out=False), + OpInfo( + "nn.functional.margin_ranking_loss", + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_margin_ranking_loss, + error_inputs_func=error_inputs_margin_ranking_loss, + reference_inputs_func=reference_inputs_margin_ranking_loss, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True), + OpInfo( + "nn.functional.multi_margin_loss", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + supports_out=False, + supports_gradgrad=False, + sample_inputs_func=sample_inputs_multi_margin_loss, + reference_inputs_func=reference_inputs_multi_margin_loss, + error_inputs_func=error_inputs_multi_margin_loss, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + "TestJit", + "test_variant_consistency_jit", + ), + ), + ), + OpInfo( + "nn.functional.multilabel_margin_loss", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + supports_out=False, + supports_gradgrad=False, + sample_inputs_func=sample_inputs_multilabel_margin_loss, + reference_inputs_func=reference_inputs_multilabel_margin_loss, + error_inputs_func=error_inputs_multilabel_margin_loss, + ), + OpInfo('nn.functional.leaky_relu', + aliases=None, + aten_name="leaky_relu", + aten_backward_name='leaky_relu_backward', + sample_inputs_func=sample_inputs_leaky_relu, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + inplace_variant=lambda x, negative_slope=0.01: + torch.nn.functional.leaky_relu(x, negative_slope, inplace=True), + supports_autograd=True, + assert_autodiffed=True, + supports_gradgrad=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::leaky_relu"]), + OpInfo( + "nn.functional.multilabel_soft_margin_loss", + supports_out=False, + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_multilabel_soft_margin_loss, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + "TestJit", + "test_variant_consistency_jit", + ), + ), + skips=( + # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 4096 + # __main__.TestJitCUDA.test_variant_consistency_jit_nn_functional_multilabel_soft_margin_loss_cuda_float32 + # leaked 4096 bytes CUDA memory on device 0 + DecorateInfo( + # Skip instead of expectedFailure because this fails + # locally for me but passes in CI. + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), + OpInfo('nn.functional.avg_pool2d', + aten_name='avg_pool2d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.int64, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + error_inputs_func=error_inputs_avg_pool2d, + sample_inputs_func=sample_inputs_avgpool2d, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), + )), + OpInfo('nn.functional.fractional_max_pool2d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.fractional_max_pool2d, input, *args, **kwargs), + # vmap does not support random operations + check_batched_forward_grad=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + test_neg_view=False, + sample_inputs_func=sample_inputs_fractional_max_pool2d, + decorators=( + # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit')), + skips=( + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),)), + OpInfo('nn.functional.fractional_max_pool3d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.fractional_max_pool3d, input, *args, **kwargs), + # vmap does not support random operations + check_batched_forward_grad=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + test_neg_view=False, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + sample_inputs_func=sample_inputs_fractional_max_pool3d, + decorators=( + # FIXME: both derivatives are implemented incorrectly + # https://github.com/pytorch/pytorch/issues/69322 + # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit')), + skips=( + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),)), + OpInfo('nn.functional.max_pool1d', + aten_name='max_pool1d', + supports_autograd=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + # TODO: add shape checks + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # Pre-existing condition; Needs to be fixed + DecorateInfo(unittest.skip("Works on some configs"), 'TestNNCOpInfo', + 'test_nnc_correctness', dtypes=(torch.bfloat16,)), + # RuntimeError: The tensor has a non-zero number of elements, but its data is not allocated yet. + # Caffe2 uses a lazy allocation, so you will need to call mutable_data() or raw_mutable_data() + # to actually allocate memory + DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), + ), + error_inputs_func=error_inputs_max_pool1d, + sample_inputs_func=sample_inputs_max_pool), + OpInfo('nn.functional.max_pool2d', + aten_name='max_pool2d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + # Vmap is not happy with non-contiguous (channels_last) inputs + check_batched_gradgrad=False, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + assert_jit_shape_analysis=True, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + error_inputs_func=error_inputs_max_pool2d, + sample_inputs_func=sample_inputs_max_pool), + OpInfo('max_pool2d_with_indices_backward', + op=max_pool2d_backward, + # We've defined a custom op, so there's no corresponding aten op + aten_name=None, + method_variant=None, + inplace_variant=None, + operator_variant=None, + inplace_operator_variant=None, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + assert_jit_shape_analysis=False, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_max_pool, + skips=( + # We've defined a custom op here, and we don't handle the case where we receive an out kwarg + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_out"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # object has no attribute max_pool2d_with_indices_backward (It's not available on torch -- so expected) + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit') + )), + OpInfo('nn.functional.max_pool3d', + aten_name='max_pool3d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # got: Batching rule not implemented for aten::flatten.using_ints + check_batched_forward_grad=False, + # TODO: add shape checks + assert_jit_shape_analysis=False, + dtypes=all_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + # TODO: investigate nondeterminism + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + error_inputs_func=error_inputs_max_pool3d, + sample_inputs_func=sample_inputs_max_pool), + OpInfo('nn.functional.max_unpool1d', + aten_name='max_unpool1d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool, + skips=( + # Gradients are tested in `variant_test_name=grad` below. + # We skip tests here because there is non-determinism in backward + # with gather, when there are writes into the same memory location, + # and if there are several indices pointing to the same memory, + # gradcheck is oblivious about that and cannot perturb them all at once + # (see sample_inputs_max_unpool_grad to find out more). + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + active_if=(not IS_MACOS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad', + device_type='cpu'), + )), + OpInfo('nn.functional.max_unpool1d', + variant_test_name='grad', + aten_name='max_unpool1d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool_grad), + OpInfo('nn.functional.max_unpool2d', + aten_name='max_unpool2d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool, + skips=( + # Gradients are tested in `variant_test_name=grad` below. + # We skip tests here because there is non-determinism in backward + # with gather, when there are writes into the same memory location, + # and if there are several indices pointing to the same memory, + # gradcheck is oblivious about that and cannot perturb them all at once + # (see sample_inputs_max_unpool_grad to find out more). + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + active_if=(not IS_MACOS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), + )), + OpInfo('nn.functional.max_unpool2d', + variant_test_name='grad', + aten_name='max_unpool2d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # Vmap is not happy with non-contiguous (channels_last) inputs + check_batched_grad=False, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool_grad), + OpInfo('nn.functional.max_unpool3d', + aten_name='max_unpool3d', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool, + skips=( + # Gradients are tested in `variant_test_name=grad` below. + # We skip tests here because there is non-determinism in backward + # with gather, when there are writes into the same memory location, + # and if there are several indices pointing to the same memory, + # gradcheck is oblivious about that and cannot perturb them all at once + # (see sample_inputs_max_unpool_grad to find out more). + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', + active_if=(not IS_MACOS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), + )), + OpInfo('nn.functional.max_unpool3d', + variant_test_name='grad', + aten_name='max_unpool3d', + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + assert_jit_shape_analysis=False, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_max_unpool_grad), + OpInfo('nn.functional.linear', + aten_name='linear', + supports_autograd=True, + supports_gradgrad=True, + sample_inputs_func=sample_inputs_linear, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # linear calls mm under the hood which is nondeterministic on CUDA + # https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_expanded_weight=True, + decorators=( + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + )), + OpInfo('nn.functional.bilinear', + aten_name='bilinear', + supports_autograd=True, + sample_inputs_func=sample_inputs_bilinear, + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, + *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else []), + decorators=( + DecorateInfo(toleranceOverride({torch.float16: tol(atol=5e-05, rtol=1e-03)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), + ), + skips=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), + ), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('nn.functional.glu', + aten_name='glu', + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + sample_inputs_func=sample_inputs_glu, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + UnaryUfuncInfo( + 'nn.functional.elu', + aten_backward_name='elu_backward', + ref=lambda x, alpha=1.0, inplace=False: + np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x) - 1)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: + ({'alpha': 0.8}, {'alpha': 0.8}), + inplace_variant=lambda x, alpha=1.0: + torch.nn.functional.elu(x, alpha, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + # Marked as a Unary function because it has some rather odd broadcasting semantics in its + # second argument + UnaryUfuncInfo( + 'nn.functional.prelu', + aten_backward_name='_prelu_kernel_backward', + ref=lambda x, weight: + np.maximum(0., x) + np.minimum(0., x) * + (weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(0, x.ndim)])), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + # test_reference_numerics only tests the case when the weight tensor is a scalar + sample_kwargs=sample_kwargs_prelu_scalar_weight, + error_inputs_func=error_inputs_prelu, + sample_inputs_func=sample_inputs_prelu, + reference_inputs_func=reference_inputs_prelu, + decorators=[ + # RuntimeError: Cannot insert a Tensor that requires grad as a constant. + # Consider making it a parameter or input, or detaching the gradient + # https://github.com/pytorch/pytorch/issues/68752 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ], + ), + UnaryUfuncInfo( + 'nn.functional.celu', + ref=lambda x, alpha=1.0, inplace=False: + np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x / alpha) - 1)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: + ({'alpha': 0.8}, {'alpha': 0.8}), + inplace_variant=lambda x, alpha=1.0: + torch.nn.functional.celu(x, alpha, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + UnaryUfuncInfo( + 'nn.functional.rrelu', + aten_backward_name='rrelu_with_noise_backward', + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.rrelu, input, *args, **kwargs), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.rrelu, input, *args, inplace=True, **kwargs), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_wrapper=wrapper_set_seed, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: + (dict(lower=0., upper=1., training=True), dict(lower=0., upper=1., training=True)), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs=dict(lower=0., upper=1., training=True)), + error_inputs_func=error_inputs_rrelu, + decorators=( + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ),), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # In-place operations do not play well with forward AD + # https://github.com/pytorch/pytorch/issues/77447 + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', + 'test_inplace_forward_mode_AD'), + # The noise vector that's generated in these tests is not the same elementwise + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'), + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'), + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_non_contig_expand'), + DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'))), + UnaryUfuncInfo( + 'nn.functional.selu', + ref=lambda x, inplace=False: + 1.0507009873554804934193349852946 * ( + np.maximum(0., x) + np.minimum(0., 1.6732632423543772848170429916717 * (np.exp(x) - 1)) + ), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, # depends on 'elu' + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + inplace_variant=lambda x: torch.nn.functional.selu(x, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-2, rtol=1.8e-2), + torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + OpInfo( + 'torch._scaled_mm', + sample_inputs_func=sample_inputs_scaled_mm, + dtypes=empty_types(), + dtypesIfCUDA=empty_types() + (torch.float8_e4m3fn,), + supports_out=True, + supports_forward_ad=False, + supports_autograd=False, + decorators=[skipCUDAIf(not SM90OrLater or TEST_WITH_ROCM, 'Requires CUDA SM >= 9.0')], + skips=() + ), + OpInfo( + 'nn.functional.scaled_dot_product_attention', + op=lambda *args, **kwargs: + wrapper_set_seed(torch.nn.functional.scaled_dot_product_attention, *args, **kwargs), + sample_inputs_func=sample_inputs_scaled_dot_product_attention, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=False, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + decorators=[DecorateInfo(toleranceOverride( + {torch.float32: tol(atol=5e-05, rtol=5e-6)}), 'TestCommon',), ], + skips=( + # When attn mask is a composite tensor this fails backward by returning a none + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward', device_type='cuda'), + # This is only failing on Linux Bionic 3.10 Cuda 11.6 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', + device_type='cuda', active_if=_get_torch_cuda_version() >= (11, 6)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples', + dtypes=(torch.float32,)), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Forward works for dtype=float64 which is the math path + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + # Not implemented for Forward AD + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', + device_type='cpu'), + # Not implemented for backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad', + device_type='cpu'), + # CPU and CUDA have inconsistencies for intermediate outputs + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace', + device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace', + device_type='cpu'), + # When changing input from Tensor to CompositeCompliantTensor, input.requires_grad() changes from true to false + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward', + device_type='cpu'), + # OpInfo was implemented with a lambda + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TODO Need to understand what this is testing and why it doesn't work + DecorateInfo(unittest.skip("Skipped"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic (when dropout_p > 0)'), 'TestCommon', 'test_compare_cpu'), + # TODO skip this for now since we can't skip on runtime arch support + DecorateInfo(unittest.skip('This is '), 'TestInductorOpInfo', 'test_comprehensive'), + # skip for sm < 80 + DecorateInfo(unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', + device_type='cuda', dtypes=(torch.bfloat16,), active_if=not SM80OrLater),), + ), + OpInfo( + 'torch.ops.aten._flash_attention_forward', + sample_inputs_func=sample_inputs_flash_attention_forward, + dtypes=empty_types(), + dtypesIfCUDA=custom_types(torch.float16) + if not SM80OrLater + else custom_types(torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=True, + supports_fwgrad_bwgrad=False, + supports_forward_ad=False, + check_batched_forward_grad=False, + decorators=[skipCUDAIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "This platform doesn't support Flash Attention")], + skips=( + # Device mismatch due to philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_autocast', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake', device_type='cuda'), + # Checking the scalar value of the philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', device_type='cuda'), + # None Mismatch Tensor + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cuda'), + ) + ), + OpInfo( + 'torch.ops.aten._efficient_attention_forward', + sample_inputs_func=sample_inputs_efficient_attention_forward, + dtypes=empty_types(), + dtypesIfCUDA=custom_types(torch.float16, torch.float32) + if not SM80OrLater + else custom_types(torch.float16, torch.float32, torch.bfloat16), + supports_out=False, + supports_autograd=True, + supports_fwgrad_bwgrad=False, + supports_forward_ad=False, + check_batched_forward_grad=False, + decorators=[skipCUDAIf(TEST_WITH_ROCM, "ROCm doesn't support efficient attention")], + skips=( + # Device mismatch due to philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_autocast', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake', device_type='cuda'), + # Checking the scaler value of the philox seed and offset + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', device_type='cuda'), + # None Mismatch Tensor + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cuda'), + ) + ), + UnaryUfuncInfo( + 'nn.functional.silu', + aten_backward_name='silu_backward', + ref=lambda x, inplace=False: x / (1 + np.exp(-x)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_autograd=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + supports_out=False, + inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-3, rtol=1e-3), + torch.bfloat16: tol(atol=1e-4, rtol=1e-4) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + dtypes=(torch.cfloat,), device_type='cpu'), + ), + autodiff_nonfusible_nodes=["aten::silu"], + ), + # TODO: combine this with the nn.functional.silu OpInfo when + # complex autodiff for silu is supported or when + # the forward bug is fixed + # Note: silu errors when given inputs that require grad + # but it doesn't support grad in their dtype + # This is why the dtypes list above passes test_dtypes, + # because it's getting lucky and failing in forward + # because test_dtypes sets requires_grad to True + # THIS IS A BUG + UnaryUfuncInfo( + 'nn.functional.silu', + variant_test_name='complex', + ref=lambda x, inplace=False: + x / (1 + np.exp(-x)), + dtypes=complex_types(), + dtypesIfCUDA=complex_types(), + supports_forward_ad=False, + supports_autograd=False, + assert_autodiffed=False, + supports_out=False, + inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-3, rtol=1e-3), + torch.bfloat16: tol(atol=1e-4, rtol=1e-4) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + dtypes=(torch.cfloat,)), + # FIXME: intentionally misreports dtypes + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + # FIXME: numpy reference diverges: Comparing (nan+nanj) and (-0+0j) + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.complex64, torch.cdouble)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=(torch.complex64,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.complex64,)))), + UnaryUfuncInfo( + 'nn.functional.hardsigmoid', + aten_backward_name='hardsigmoid_backward', + ref=reference_hardsigmoid, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=False, + supports_forward_ad=True, + supports_out=False, + inplace_variant=partial(torch.nn.functional.hardsigmoid, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-04, rtol=0.001)}), 'TestUnaryUfuncs', device_type='cuda',), ], + skips=[ + # still want to test that first derivative works though second derivative isn't supported + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', "test_inplace_gradgrad"), + # produces 0 instead of nan on ROCM + DecorateInfo(unittest.expectedFailure, + 'TestUnaryUfuncs', "test_reference_numerics_extremal", + device_type='cuda', + active_if=(TEST_WITH_ROCM)), ] + ), + UnaryUfuncInfo( + 'nn.functional.logsigmoid', + aten_name="log_sigmoid", + aten_backward_name='log_sigmoid_backward', + ref=reference_logsigmoid, + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_autograd=True, + assert_autodiffed=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_gradgrad=True, + # autodiff_nonfusible_nodes=["aten::log_sigmoid"], + decorators=[ + DecorateInfo( + precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), + 'TestUnaryUfuncs', 'test_reference_numerics_small'), + DecorateInfo( + precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), + 'TestUnaryUfuncs', 'test_reference_numerics_large'), + DecorateInfo( + precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), + ], + skips=( + # Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cpu'), + ), + ), + UnaryUfuncInfo( + 'nn.functional.mish', + aten_backward_name='mish_backward', + ref=lambda x: x * np.tanh(reference_softplus(x)), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + inplace_variant=partial(torch.nn.functional.mish, inplace=True), + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs',), ], + ), + UnaryUfuncInfo( + 'nn.functional.softsign', + ref=lambda x: x / (np.abs(x) + 1), + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1.3e-04)}), 'TestUnaryUfuncs',), ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=(torch.int, torch.int8)),), + ), + UnaryUfuncInfo( + 'nn.functional.tanhshrink', + ref=lambda x: x - np.tanh(x), + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + decorators=[ + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo( + toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs',), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=6e-04, rtol=1e-05), + torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + # in each case, pytorch will produce a nan while numpy will not + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_large", + dtypes=(torch.complex64, torch.complex128), active_if=(IS_MACOS)), + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_extremal", + dtypes=(torch.complex64, torch.complex128), device_type='cpu', + active_if=(IS_MACOS or IS_WINDOWS)), + ), + # tan(j * pi/2 * odd_number) is nan which also make tanhshrink nan. + reference_numerics_filter=NumericsFilter( + condition=lambda x: (close_to_int(x / (math.pi * 0.5j)) + if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), + safe_val=0) + ), + UnaryUfuncInfo( + 'nn.functional.threshold', + ref=lambda x, threshold, value: np.where(x <= threshold, value, x).astype(x.dtype), + dtypes=all_types_and(torch.half, torch.bfloat16), + inplace_variant=lambda x, threshold, value: + torch.nn.functional.threshold(x, threshold, value, inplace=True), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_gradgrad=True, + supports_out=False, + sample_kwargs=lambda device, dtype, input: ({'threshold': float.fromhex('0x1.3ap-3'), + 'value': -9}, + {'threshold': float.fromhex('0x1.3ap-3'), + 'value': -9}), + # TODO(whc) should not need sample_inputs_func, but without it + # kwargs aren't being hooked up properly + sample_inputs_func=sample_inputs_threshold, + ), + OpInfo( + "nn.functional.triplet_margin_loss", + sample_inputs_func=sample_inputs_triplet_margin_loss, + error_inputs_func=error_inputs_triplet_margin_loss, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo( + "nn.functional.triplet_margin_with_distance_loss", + sample_inputs_func=partial(sample_inputs_triplet_margin_loss, with_distance=True), + error_inputs_func=error_inputs_triplet_margin_loss, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # This test cannot handle a callable passed to `distance_function`. If we would use + # `distance_function=None`, the test would pass fine. + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + ), + DecorateInfo( + unittest.expectedFailure, + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + ), + ), + BinaryUfuncInfo('nextafter', + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16), + supports_autograd=False, + supports_rhs_python_scalar=False), + OpInfo( + "to", + op=lambda x, *args, **kwargs: x.to(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=sample_inputs_to, + skips=( + # RuntimeError: undefined value cpu + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cpu", + ), + # NotImplementedError: Cannot copy out of meta tensor; no data! + DecorateInfo( + unittest.skip("Skipped!"), + "TestMeta", + "test_meta_outplace", + ), + # https://github.com/pytorch/pytorch/issues/84335 + DecorateInfo( + unittest.skip("Skipped!"), + "TestProxyTensorOpInfo", + "test_make_fx_symbolic_exhaustive", + ), + DecorateInfo( + unittest.skip("Skipped!"), + "TestNormalizeOperators", + "test_normalize_operator_exhaustive", + ), + ), + ), + OpInfo('topk', + dtypes=all_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + sample_inputs_func=sample_inputs_topk), + # Multiple variants for batch_norm to test with and without cuDNN disabled + # See https://github.com/pytorch/pytorch/pull/63218#discussion_r688549391 for more details + OpInfo('nn.functional.batch_norm', + aten_name='batch_norm', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + sample_inputs_func=sample_inputs_batch_norm, + skips=( + # see https://github.com/pytorch/pytorch/issues/71286 + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.bfloat16, torch.float16)), + # Trying to use forward AD with miopen_batch_norm that does not support it + # because it has not been implemented yet. + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad', + device_type="cuda", active_if=TEST_WITH_ROCM), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-05, rtol=1e-05)}), + 'TestCompositeCompliance', 'test_forward_ad', device_type="cpu"), + )), + # This variant tests batch_norm with cuDNN disabled only on CUDA devices + OpInfo('nn.functional.batch_norm', + variant_test_name='without_cudnn', + aten_name='batch_norm', + dtypes=empty_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=[onlyCUDA, disablecuDNN], + skips=( + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-04)}), + 'TestJit', 'test_variant_consistency_jit'), + ), + sample_inputs_func=sample_inputs_batch_norm), + OpInfo( + "nn.functional.binary_cross_entropy", + aten_backward_name='binary_cross_entropy_backward', + sample_inputs_func=sample_inputs_binary_cross_entropy, + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + gradcheck_fast_mode=False, + supports_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + # RuntimeError: expected int at position 0, but got: Tensor + DecorateInfo( + unittest.skip("Skipped!"), + "TestCudaFuserOpInfo", + ), + # RuntimeError: expected int at position 0, but got: Tensor + DecorateInfo( + unittest.skip("Skipped!"), + "TestNNCOpInfo", + "test_nnc_correctness", + ), + DecorateInfo( + toleranceOverride({torch.float32: tol(atol=1e-3, rtol=1e-3)}), + "TestJit", + "test_variant_consistency_jit", + ), + # RuntimeError: output with shape [] doesn't match the broadcast shape [5, 5] + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + ), + skips=( + # RuntimeError: expected int at position 0, but got: Tensor + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + ), + ), + ), + # We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the + # standard entry, second is to run gradcheck tests on the second argument. + BinaryUfuncInfo('igamma', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + aliases=('torch.special.gammainc',), + dtypesIfCUDA=floating_types(), + # TODO: FIXME + supports_rhs_python_scalar=False, + supports_autograd=False, + skips=( + # FIXME: incorrectly tries to pass a rhs scalar + DecorateInfo(unittest.expectedFailure, 'TestJit', + 'test_jit_alias_remapping'), + )), + # TODO: FIXME, ideally by implemented grad for both inputs + # BinaryUfuncInfo('igamma', + # variant_test_name='grad_other', + # # Since autograd formula is implemented only for other and + # # gradcheck test verifies the formula for input in SampleInput, + # # we permute the arguments. + # op=lambda self, other, **kwargs: torch.igamma(other, self, **kwargs), + # inplace_variant=None, + # method_variant=None, + # supports_rhs_python_scalar=False, + # rhs_make_tensor_kwargs=dict(requires_grad=False), + # dtypes=floating_types_and(torch.bfloat16, torch.float16), + # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), + # dtypesIfCUDA=floating_types(), + # backward_dtypesIfCUDA=floating_types(), + # supports_inplace_autograd=False, + # skips=( + # # Derivative wrt first tensor not implemented + # DecorateInfo(unittest.expectedFailure, "TestCommon", + # "test_floating_inputs_are_differentiable"),"), + # # test does not work with passing lambda for op + # # AssertionError: False is not true : Tensors failed to compare as equal! + # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # # test fails are we permute the arguments function variant + # # but not for inplace or method. + # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # # TypeError: igamma(): argument 'input' (position 1) must be Tensor, not float + # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), + # )), + BinaryUfuncInfo('igammac', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + aliases=('torch.special.gammaincc',), + dtypesIfCUDA=floating_types(), + supports_autograd=False, + supports_rhs_python_scalar=False, + skips=( + # FIXME: incorrectly tries to pass a rhs scalar + DecorateInfo(unittest.expectedFailure, 'TestJit', + 'test_jit_alias_remapping'), + )), + # TODO: FIXME, ideally by implementing grad for both inputs + # BinaryUfuncInfo('igammac', + # variant_test_name='grad_other', + # # Since autograd formula is implemented only for other and + # # gradcheck test verifies the formula for input in SampleInput, + # # we permute the arguments + # op=lambda self, other, **kwargs: torch.igammac(other, self, **kwargs), + # inplace_variant=None, + # method_variant=None, + # supports_rhs_python_scalar=False, + # rhs_make_tensor_kwargs=dict(requires_grad=False), + # dtypes=floating_types_and(torch.bfloat16, torch.float16), + # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), + # dtypesIfCUDA=floating_types(), + # backward_dtypesIfCUDA=floating_types(), + # supports_inplace_autograd=False, + # decorators=[ + # # Derivative wrt first tensor not implemented + # DecorateInfo(unittest.expectedFailure, "TestCommon", + # "test_floating_inputs_are_differentiable"), + # ], + # skips=( + # # test does not work with passing lambda for op + # # AssertionError: False is not true : Tensors failed to compare as equal! + # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # # test fails are we permute the arguments function variant + # # but not for inplace or method. + # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # # TypeError: igammac(): argument 'input' (position 1) must be Tensor, not float + # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), + # )), + UnaryUfuncInfo('nn.functional.softshrink', + aten_name="softshrink", + aten_backward_name='softshrink_backward', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + sample_inputs_func=sample_inputs_softshrink, + error_inputs_func=error_inputs_softshrink), + UnaryUfuncInfo('nn.functional.hardshrink', + aten_name="hardshrink", + aten_backward_name='hardshrink_backward', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_hardshrink, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::hardshrink"]), + UnaryUfuncInfo('nn.functional.hardtanh', + aten_name="hardtanh", + aten_backward_name='hardtanh_backward', + dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.half, torch.bfloat16), + backward_dtypes=all_types_and(torch.half, torch.bfloat16), + backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + sample_inputs_func=sample_inputs_hardtanh, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::hardtanh"]), + OpInfo('nn.functional.gelu', + aten_name="gelu", + aten_backward_name='gelu_backward', + ref=reference_gelu if TEST_SCIPY else None, + error_inputs_func=error_inputs_gelu, + supports_autograd=True, + assert_autodiffed=True, + sample_inputs_func=sample_inputs_gelu, + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_gradgrad=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::gelu"], + skips=( + # AssertionError: Tensor-likes are not close! + # May not replicate in CI + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + )), + UnaryUfuncInfo('nn.functional.relu6', + aten_name="relu6", + dtypes=all_types_and(torch.half, torch.bfloat16), + backward_dtypes=floating_types_and(torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=["aten::relu6"]), + OpInfo('mm', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_mm, + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + )), + OpInfo('mode', + op=torch.mode, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Resized a non-empty tensor but did not warn about it + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + ), + sample_inputs_func=sample_inputs_mode,), + make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_1', + domain=(1, None), + skips=skips_mvlgamma(), + sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})), + make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_3', + domain=(2, None), + skips=skips_mvlgamma(), + sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})), + make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_5', + domain=(3, None), + skips=skips_mvlgamma(), + sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})), + BinaryUfuncInfo('ne', + ref=np.not_equal, + aliases=('not_equal',), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + always_returns_bool=True, + supports_autograd=False, + skips=( + )), + OpInfo('narrow', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=partial(sample_inputs_narrow_narrow_copy, is_narrow=True), + reference_inputs_func=partial(reference_inputs_narrow_narrow_copy, is_narrow=True), + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=True, is_ref=False), + skips=( + # Use of .item() + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + )), + OpInfo('narrow_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=True, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + supports_autograd=False, + # https://github.com/pytorch/pytorch/issues/86931 + sample_inputs_func=partial(sample_inputs_narrow_narrow_copy, is_narrow=False), + reference_inputs_func=partial(reference_inputs_narrow_narrow_copy, is_narrow=False), + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=False, is_ref=False), + skips=( + # https://github.com/pytorch/pytorch/issues/84577 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # Lazy tensor failures: mutating and aliasing ops should all have codegen'd kernels + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_correctness'), + DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'), + # Could not run 'aten::narrow_copy.out' with arguments from the 'CUDA' backend + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_outplace', + device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace', + device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace', + device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + )), + OpInfo('view_copy', + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + ref=lambda x, newshape: np.reshape(x, newshape).copy(), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_autograd=True, + sample_inputs_func=sample_inputs_view_reshape, + error_inputs_func=error_inputs_view_reshape), + UnaryUfuncInfo('neg', + aliases=('negative', ), + ref=np.negative, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + error_inputs_func=error_inputs_neg, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + OpInfo('dist', + op=torch.dist, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: + # Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_dist), + OpInfo('outer', + op=torch.outer, + aliases=('ger', ), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_outer,), + OpInfo('ormqr', + op=torch.ormqr, + dtypes=floating_and_complex_types(), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=False, + supports_fwgrad_bwgrad=False, + sample_inputs_func=sample_inputs_ormqr, + error_inputs_func=error_inputs_ormqr, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack], + skips=( + # Strides are not the same! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + )), + OpInfo('permute', + ref=np.transpose, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_varargs=True, + sample_inputs_func=sample_inputs_permute, + reference_inputs_func=reference_inputs_permute), + BinaryUfuncInfo('pow', + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), + ref=np.power, + # Due to AVX2 currently not being fully supported for Float16, log_vml_cpu can't be enabled + # for Float16, causing this test to fail. pow's autograd for Float16 is thus currently + # unsupported on CPU. + backward_dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + supports_one_python_scalar=True, + # Integer types do not support negative exponentes + rhs_make_tensor_kwargs=dict(low=0), + # Raising negative real numbers to fractional powers is not supported + lhs_make_tensor_kwargs=dict(low=0), + decorators=( + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), + torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_scalar_support'), + ), + skips=( + # Skipping integers because they are being raised to negative powers causing an error + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]), + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_large_values', + dtypes=[torch.int16, torch.int32, torch.int64]), + # FIXME Complex values error with: Greatest absolute difference: nan at index + # Ref: https://github.com/pytorch/pytorch/issues/76853 + # For `chalf`, reference computation in `numpy` is computed in `cfloat`. + # Output of `chalf` saturates to `inf` quicker than reference due to its small range + # which leads to failure of this test. + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick', + dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM), + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive', + dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing', + dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_batch_vs_slicing', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + )), + BinaryUfuncInfo('float_power', + ref=np.float_power, + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), + promotes_int_to_float=True, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + # Integer types do not support negative exponentes + rhs_make_tensor_kwargs=dict(low=0), + # Raising negative real numbers to fractional powers is not supported + lhs_make_tensor_kwargs=dict(low=0), + decorators=( + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), + torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_scalar_support'), + ), + skips=( + # FIXME + # AssertionError: Object comparison failed: torch.float64 != torch.float32 + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # -3.43399e+38 is outside the range of representable values of type 'float' + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Complex values error with: Greatest absolute difference: nan at index + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', + dtypes=[torch.complex64, torch.complex128]), + # Inplace always promotes to double and thus other floating dtypes are not supported + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', + dtypes=[torch.bfloat16, torch.float16, torch.float32]), + )), + OpInfo('qr', + op=torch.qr, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_linalg_qr_geqrf, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # In-place ops + check_batched_gradgrad=False, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]), + UnaryUfuncInfo('rad2deg', + ref=np.degrees, + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True), + UnaryUfuncInfo('real', + ref=np.real, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # Skip since real and imag don't have out variants. + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), + )), + OpInfo( + "roll", + ref=np.roll, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + error_inputs_func=error_inputs_roll, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_roll, + decorators=(onlyNativeDeviceTypes,), + ), + OpInfo( + "rot90", + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), + error_inputs_func=error_inputs_rot90, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_rot90, + ), + # To test reference numerics against multiple values of argument `decimals`, + # we make multiple OpInfo entries with each entry corresponding to different value of decimals. + UnaryUfuncInfo('round', + ref=np.round, + aliases=('special.round',), + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bfloat16,)), + ), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True, + ), + UnaryUfuncInfo('round', + ref=np.round, + variant_test_name='decimals_0', + aliases=('special.round',), + dtypes=floating_types_and(torch.half, torch.bfloat16), + sample_kwargs=lambda device, dtype, input: ({'decimals': 0}, {'decimals': 0}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 0}), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_sparse_csr=False), + UnaryUfuncInfo('round', + ref=np.round, + variant_test_name='decimals_3', + aliases=('special.round',), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_kwargs=lambda device, dtype, input: ({'decimals': 3}, {'decimals': 3}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 3}), + skips=( + # test_ops already tested for this overload with `decimals_0` opinfo entry + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_extremal", + device_type="cuda"), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_normal", + device_type="cuda"), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_sparse_csr=False), + UnaryUfuncInfo('round', + ref=np.round, + variant_test_name='decimals_neg_3', + aliases=('special.round',), + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + sample_kwargs=lambda device, dtype, input: ({'decimals': -3}, {'decimals': -3}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': -3}), + skips=( + # test_ops already tested for this overload with `decimals_0` opinfo entry + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=False, + supports_sparse_csr=False), + UnaryUfuncInfo('sin', + ref=np.sin, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + handles_large_floats=False, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + decorators=(precisionOverride({torch.bfloat16: 1e-2}),)), + UnaryUfuncInfo('sinc', + ref=np_sinc_with_fp16_as_fp32, + aliases=('special.sinc',), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + handles_large_floats=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 1e-2, + torch.float16: 1e-2}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/49133 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=[torch.cfloat]), + )), + UnaryUfuncInfo('sinh', + ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.float16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.cdouble,)), + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('sign', + ref=reference_sign, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), + )), + UnaryUfuncInfo('sgn', + ref=reference_sgn, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + OpInfo('split', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=partial(sample_inputs_split, list_args=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_autodiffed=True), + OpInfo('split', + # Cannot declare this aten_name because of + # test_variant_consistency_jit_split_list_args_cpu_float32 + decomp_aten_name='split_with_sizes', + variant_test_name='list_args', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + sample_inputs_func=partial(sample_inputs_split, list_args=True), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + # `unsafe_split` supports only `int` for split_size argument + OpInfo('unsafe_split', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=partial(sample_inputs_split, list_args=False), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_autodiffed=True, + check_batched_forward_grad=False), + OpInfo('split_with_sizes', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_split_with_sizes, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + BinaryUfuncInfo('__radd__', + op=torch.Tensor.__radd__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + + ), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=['aten::add'],), + BinaryUfuncInfo('__rdiv__', + op=torch.Tensor.__rdiv__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + promotes_int_to_float=True, + lhs_make_tensor_kwargs={'exclude_zero': True}, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + skips=( + # https://github.com/pytorch/pytorch/issues/76806 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],), + BinaryUfuncInfo('__rmul__', + op=torch.Tensor.__rmul__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + autodiff_nonfusible_nodes=['aten::mul'],), + BinaryUfuncInfo('__rand__', + op=torch.Tensor.__rand__, + dtypes=integral_types_and(torch.bool), + supports_out=False, + supports_autograd=False, + supports_forward_ad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + BinaryUfuncInfo('__ror__', + op=torch.Tensor.__ror__, + dtypes=integral_types_and(torch.bool), + supports_out=False, + supports_autograd=False, + supports_forward_ad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + BinaryUfuncInfo('__rxor__', + op=torch.Tensor.__rxor__, + dtypes=integral_types_and(torch.bool), + supports_out=False, + supports_autograd=False, + supports_forward_ad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + )), + OpInfo('__rmatmul__', + op=torch.Tensor.__rmatmul__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, + *[torch.bfloat16] + if SM53OrLater or TEST_WITH_ROCM else []), + assert_autodiffed=True, + sample_inputs_func=partial(sample_inputs_matmul, is_rmatmul=True), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + decorators=( + # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestMathBits', 'test_conj_view'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1.2e-03)}), + 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1e-05)}), + "TestDecomp", "test_comprehensive", device_type="cuda", + active_if=TEST_WITH_ROCM), + ), + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + # https://github.com/pytorch/pytorch/issues/67470 + DecorateInfo(unittest.skip("67470!"), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cpu', dtypes=(torch.long,)), + # Fails on XLA. + # AssertionError: False is not true : Tensors failed to compare as equal + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), + # https://github.com/pytorch/pytorch/issues/71774 + DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', + device_type='cpu', dtypes=(torch.long,)), + )), + BinaryUfuncInfo('__rmod__', + op=torch.Tensor.__rmod__, + dtypes=floating_types_and(torch.bfloat16, torch.half,), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + # Support autograd after torch.remainder(Tensor, Tensor) supports + # autograd of the second argument. + # https://github.com/pytorch/pytorch/pull/58476/files#r637167630 + # supports_autograd=False, + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::remainder'],), + BinaryUfuncInfo('__rpow__', + op=torch.Tensor.__rpow__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + # Reference: https://github.com/pytorch/pytorch/issues/54774 + # "log2" "_vml_cpu" not implemented for Half + backward_dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + # TODO: FIXME tolerance is too high + DecorateInfo(unittest.skip('Skipped!'), 'TestFwdGradients'), + DecorateInfo(unittest.skip('Skipped!'), 'TestBwdGradients'), + ), + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::pow'],), + BinaryUfuncInfo('__rsub__', + op=torch.Tensor.__rsub__, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + supports_one_python_scalar=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), + ), + assert_autodiffed=True, + autodiff_nonfusible_nodes=['aten::rsub'],), + BinaryUfuncInfo('rsub', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + supports_inplace_autograd=False, + assert_autodiffed=None, + sample_inputs_func=sample_inputs_add_sub), + OpInfo('select', + aten_backward_name='select_backward', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_select, + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('select_scatter', + dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), + sample_inputs_func=sample_inputs_select_scatter, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + OpInfo('slice', + op=torch.ops.aten.slice.Tensor, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), + sample_inputs_func=sample_inputs_slice, + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_scripting=False, + supports_inplace_autograd=False, + supports_out=False), + OpInfo('slice_scatter', + dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), + sample_inputs_func=sample_inputs_slice_scatter, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False), + UnaryUfuncInfo('signbit', + ref=np.signbit, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False,), + UnaryUfuncInfo('tan', + ref=np.tan, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=1e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda'),), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + # tan(pi/2 * odd_number) is nan + reference_numerics_filter=NumericsFilter( + condition=lambda x: close_to_int(x / (math.pi * 0.5)), safe_val=math.pi)), + UnaryUfuncInfo('tanh', + ref=np.tanh, + aten_backward_name='tanh_backward', + aliases=('nn.functional.tanh',), + decorators=(precisionOverride({torch.bfloat16: 1e-2}), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=2e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda'),), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + # tan(j * pi/2 * odd_number) is nan + reference_numerics_filter=NumericsFilter( + condition=lambda x: (close_to_int(x / (math.pi * 0.5j)) + if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), + safe_val=0)), + OpInfo('tensor_split', + ref=np.array_split, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Pre-existing condition; Needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + ), + sample_inputs_func=sample_inputs_tensor_split,), + OpInfo('hsplit', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_hsplit, + error_inputs_func=error_inputs_hsplit,), + OpInfo('vsplit', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_vsplit, + error_inputs_func=error_inputs_vsplit,), + OpInfo('dsplit', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_dsplit, + error_inputs_func=error_inputs_dsplit,), + OpInfo('triangular_solve', + op=torch.triangular_solve, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_legacy_solve, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], + skips=( + # AssertionError: Scalars are not equal! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # Gradcheck fails + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', + dtypes=floating_and_complex_types()), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + )), + UnaryUfuncInfo('trunc', + aliases=('fix', ), + ref=np.trunc, + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=tuple(t for t in integral_types() if t != torch.uint8)), + ), + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + assert_autodiffed=True), + UnaryUfuncInfo('exp2', + aliases=('special.exp2', ), + ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + )), + UnaryUfuncInfo('expm1', + aliases=('special.expm1', ), + ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + promotes_int_to_float=True, + assert_autodiffed=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.complex128]), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('nan_to_num', + ref=np.nan_to_num, + dtypes=all_types_and(torch.half, torch.bool, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse=True, + skips=( + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + ), + # Passing numpy_kwargs via sample_kwargs, as numpy does comparison + # with BFloat16 in float, since it currently doesn't support BFloat16. + # Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556 + sample_kwargs=lambda device, dtype, input: ({}, + {'posinf': torch.finfo(torch.bfloat16).max, + 'neginf': torch.finfo(torch.bfloat16).min}) + if dtype is torch.bfloat16 else ({}, {})), + UnaryUfuncInfo('reciprocal', + ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/45690 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + )), + UnaryUfuncInfo('rsqrt', + ref=lambda x: np.reciprocal(np.sqrt(x)), + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + decorators=(precisionOverride({torch.half: 5e-2}),), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble)), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 0.01 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=(torch.chalf,)), + )), + UnaryUfuncInfo('sqrt', + ref=np.sqrt, + supports_sparse=True, + domain=(0, None), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=( + precisionOverride({torch.bfloat16: 7e-2}), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestUnaryUfuncs', 'test_reference_numerics_large'), + ), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/47358 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + device_type='cpu', dtypes=(torch.cfloat, torch.cdouble), + active_if=IS_MACOS), + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + )), + UnaryUfuncInfo('square', + ref=np.square, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/52549 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble]), + # >>> t = torch.tensor(complex(-0.01, float("inf"))) + # >>> np.square(t.numpy()) + # (-inf-infj) + # >>> t.square() + # tensor(-inf-infj) + # >>> t.cuda().square() + # tensor(inf+nanj, device='cuda:0') + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', + dtypes=[torch.bool]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace', + dtypes=[torch.bool]), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace', + dtypes=[torch.bool]), + ),), + OpInfo('lerp', + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_and_complex_types_and(torch.chalf, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_lerp, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + UnaryUfuncInfo('angle', + ref=np.angle, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2}),), + backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_complex_to_float=True, + skips=( + # Ref: https://github.com/pytorch/pytorch/issues/78413 + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_small', + dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64),), + )), + UnaryUfuncInfo('isfinite', + ref=np.isfinite, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_autograd=False), + UnaryUfuncInfo('isinf', + ref=np.isinf, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + UnaryUfuncInfo('isposinf', + ref=np.isposinf, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + UnaryUfuncInfo('isneginf', + ref=np.isneginf, + dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + UnaryUfuncInfo('isreal', + ref=np.isreal, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + supports_out=False, + supports_autograd=False), + UnaryUfuncInfo('isnan', + ref=np.isnan, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + supports_out=False, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_autograd=False), + OpInfo('einsum', + # we need this lambda because SampleInput expects tensor input as the first argument + # TODO(@heitorschueroff) update SampleInput to handle such cases + op=lambda tensors, equation: torch.einsum(equation, tensors), + dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + # See https://github.com/pytorch/pytorch/issues/66357 + sample_inputs_func=sample_inputs_einsum, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # test does not work with passing lambda for op + # there's a test `test_einsum` in `test_jit.py` to handle this case + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('svd', + op=torch.svd, + dtypes=floating_and_complex_types(), + sample_inputs_func=sample_inputs_svd, + # Runs very slowly on slow-gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + # We're using at::allclose, which does not have a batching rule + check_batched_grad=False, + check_batched_gradgrad=False, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], + skips=( + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_crossref_backward_amp', + device_type='cuda', dtypes=[torch.float32], active_if=TEST_WITH_ROCM), + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_crossref_backward_no_amp', + device_type='cuda', dtypes=[torch.float32], active_if=TEST_WITH_ROCM), + )), + OpInfo('svd_lowrank', + op=lambda *args, **kwargs: wrapper_set_seed( + lambda a, b, **kwargs: torch.svd_lowrank(a @ b.mT, **kwargs), + *args, **kwargs + ), + dtypes=floating_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + supports_forward_ad=True, + sample_inputs_func=sample_inputs_svd_lowrank, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off, + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cuda')], + skips=( + # test does not work with passing lambda for op + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(slowTest, 'TestCompositeCompliance', 'test_forward_ad'), + )), + OpInfo('pca_lowrank', + op=lambda *args, **kwargs: wrapper_set_seed( + lambda a, b, **kwargs: torch.pca_lowrank(a @ b.mT, **kwargs), + *args, **kwargs + ), + dtypes=floating_types(), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + check_batched_forward_grad=False, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_pca_lowrank, + decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off, + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03)}), + 'TestCommon', 'test_noncontiguous_samples', + device_type='cuda')], + skips=( + # test does not work with passing lambda for op + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + BinaryUfuncInfo('polar', + dtypes=floating_types(), + # this function is undefined if 'abs' values are <0 + supports_forward_ad=True, + lhs_make_tensor_kwargs=dict(low=0), + supports_rhs_python_scalar=False, + skips=( + # RuntimeError: Expected object of scalar type Float but got scalar type Double for second argument + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + # GradcheckError: Jacobian computed with forward mode mismatch for output 0 with respect to input 0 + # Numerical: + # tensor([[0.]], dtype=torch.float64) + # Analytical: + # tensor([[-0.0047]], dtype=torch.float64, grad_fn=) + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + )), + # TODO(@kshitij12345): Refactor similar to `mvlgamma` entries. + # To test reference numerics against multiple values of argument `n`, + # we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4). + # We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing. + UnaryUfuncInfo('polygamma', + op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), + variant_test_name='polygamma_n_0', + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_polygamma, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0})), + UnaryUfuncInfo('polygamma', + op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), + variant_test_name='polygamma_n_1', + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_polygamma, + skips=( + # Redundant tests + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + # Mismatch: https://github.com/pytorch/pytorch/issues/55357 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large'), + ), + sample_kwargs=lambda device, dtype, input: ({'n': 1}, {'n': 1}), + # polygamma functions have multiple singularities at x <= 0 + reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), + UnaryUfuncInfo('polygamma', + op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), + variant_test_name='polygamma_n_2', + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_polygamma, + skips=( + # Redundant tests + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + # Mismatch: https://github.com/pytorch/pytorch/issues/55357 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),), + sample_kwargs=lambda device, dtype, input: ({'n': 2}, {'n': 2}), + # polygamma functions have multiple singularities at x <= 0 + reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), + UnaryUfuncInfo('polygamma', + op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), + variant_test_name='polygamma_n_3', + ref=reference_polygamma if TEST_SCIPY else None, + dtypes=all_types_and(torch.bool, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_polygamma, + skips=( + # Redundant tests + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + # Mismatch: https://github.com/pytorch/pytorch/issues/55357 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),), + sample_kwargs=lambda device, dtype, input: ({'n': 3}, {'n': 3}), + # polygamma functions have multiple singularities at x <= 0 + reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), + UnaryUfuncInfo('polygamma', + op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), + variant_test_name='polygamma_n_4', + ref=reference_polygamma if TEST_SCIPY else None, + decorators=(precisionOverride({torch.float16: 5e-4, torch.float32: 5e-4}),), + dtypes=all_types_and(torch.bool, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + sample_inputs_func=sample_inputs_polygamma, + skips=( + # Redundant tests + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), + # Mismatch: https://github.com/pytorch/pytorch/issues/55357 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'),), + sample_kwargs=lambda device, dtype, input: ({'n': 4}, {'n': 4}), + # polygamma functions have multiple singularities at x <= 0 + reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), + OpInfo('ravel', + ref=np.ravel, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_ravel, + ), + OpInfo('unravel_index', + ref=np.unravel_index, + dtypes=integral_types_and(), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_unravel_index, + ), + OpInfo('reshape', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_view_reshape, + reference_inputs_func=reference_inputs_view_reshape, + error_inputs_func=error_inputs_view_reshape, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo('reshape_as', + op=lambda x, other: x.reshape_as(other), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=partial(sample_inputs_view_reshape, tensor_arg=True), + reference_inputs_func=partial(reference_inputs_view_reshape, tensor_arg=True), + error_inputs_func=partial(error_inputs_view_reshape, tensor_arg=True), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + )), + OpInfo('view', + op=lambda x, shape: x.view(shape), + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + sample_inputs_func=sample_inputs_view_reshape, + reference_inputs_func=reference_inputs_view_reshape, + error_inputs_func=error_inputs_view_reshape, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: view size is not compatible with input tensor's size and stride + # (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead. + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + OpInfo('view_as', + op=lambda x, other: x.view_as(other), + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=partial(sample_inputs_view_reshape, tensor_arg=True), + reference_inputs_func=partial(reference_inputs_view_reshape, tensor_arg=True), + error_inputs_func=partial(error_inputs_view_reshape, tensor_arg=True), + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides") + )), + OpInfo('atleast_1d', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_atleast1d2d3d, + skips=( + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + ), + OpInfo('atleast_2d', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + sample_inputs_func=sample_inputs_atleast1d2d3d, + ), + OpInfo('atleast_3d', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), + ), + sample_inputs_func=sample_inputs_atleast1d2d3d, + ), + OpInfo('flatten', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + ref=reference_flatten, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_flatten, + reference_inputs_func=reference_inputs_flatten, + ), + OpInfo('unflatten', + op=torch.unflatten, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_unflatten, + ), + OpInfo('column_stack', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"),), + sample_inputs_func=sample_inputs_column_stack,), + OpInfo('pinverse', + op=torch.pinverse, + dtypes=floating_and_complex_types(), + check_batched_grad=False, + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False, + sample_inputs_func=sample_inputs_linalg_invertible, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', + device_type='mps', dtypes=[torch.float32]), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', + device_type='mps', dtypes=[torch.float32]), + )), + OpInfo('gather', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_gather, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_gather, + ), + OpInfo('index_fill', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + # RuntimeError: Mismatch on aten._unique.default: Shapes torch.Size([2]) and torch.Size([1]) are not equal! + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_no_amp'), + # RuntimeError: Mismatch on aten._unique.default: Shapes torch.Size([2]) and torch.Size([1]) are not equal! + DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_amp'), + ), + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True)), + OpInfo('index_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('index_select', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True), + error_inputs_func=error_inputs_index_select, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('index_add', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_index, + reference_inputs_func=partial(sample_inputs_index, reference=True), + error_inputs_func=error_inputs_index_add, + skips=( + # boolean alpha not handled properly + DecorateInfo(unittest.expectedFailure, + 'TestNNCOpInfo', + 'test_nnc_correctness', + dtypes=(torch.bool,)), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), + OpInfo('index_reduce', + dtypes=all_types_and(torch.float16, torch.bfloat16), + supports_out=True, + sample_inputs_func=sample_inputs_index_reduce), + OpInfo('__getitem__', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_inplace_autograd=False, + supports_scripting=False, + op=torch.Tensor.__getitem__, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 104448 + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),), + sample_inputs_func=sample_inputs_getitem), + OpInfo('index_put', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_inplace_autograd=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + test_neg_view=False, + sample_inputs_func=sample_inputs_index_put, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.skip("Skipped"), 'TestBwdGradients', 'test_fn_grad', dtypes=[torch.float64], + device_type='cuda', active_if=(TEST_WITH_ROCM and TEST_WITH_TORCHINDUCTOR)), + )), + OpInfo('sort', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_sort, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + )), + OpInfo('unique', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.float16), + sample_inputs_func=sample_inputs_unique, + supports_out=False, + supports_autograd=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Output order is undefined when sorted=False'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('unique_consecutive', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.float16), + sample_inputs_func=sample_inputs_unique_consecutive, + supports_out=False, + supports_autograd=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('put', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + check_batched_gradgrad=False, # vmap complains of the sizes + sample_inputs_func=sample_inputs_put), + OpInfo('take', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + check_batched_grad=False, # vmap complains of the sizes + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_take, + error_inputs_func=error_inputs_take), + OpInfo('scatter', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter, + error_inputs_func=error_inputs_scatter_and_scatter_add), + UnaryUfuncInfo( + 'bfloat16', + op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + )), + UnaryUfuncInfo( + 'bool', + op=lambda x, *args, **kwargs: x.bool(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attributis not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'byte', + op=lambda x, *args, **kwargs: x.byte(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + # The autograd test runner cannot handle functions that change dtype + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'char', + op=lambda x, *args, **kwargs: x.char(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + # The autograd test runner cannot handle functions that change dtype + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'double', + op=lambda x, *args, **kwargs: x.double(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'float', + op=lambda x, *args, **kwargs: x.float(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'half', + op=lambda x, *args, **kwargs: x.half(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=True, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + UnaryUfuncInfo( + 'int', + op=lambda x, *args, **kwargs: x.int(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'long', + op=lambda x, *args, **kwargs: x.long(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'short', + op=lambda x, *args, **kwargs: x.short(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + )), + UnaryUfuncInfo( + 'cdouble', + op=torch.Tensor.cdouble, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + )), + UnaryUfuncInfo( + 'cfloat', + op=torch.Tensor.cfloat, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # RuntimeError: attribute lookup is not defined on builtin + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + )), + UnaryUfuncInfo( + 'chalf', + op=lambda x, *args, **kwargs: x.chalf(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_conversion, + skips=( + # autograd tests don't handle operators that change dtype + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), + # use of lambda doesn't work with test_normalize_operator_exhaustive + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager', + device_type='cpu'), + # TypeError: 'int' object is not iterable + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view', + device_type='cpu'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view', + device_type='cpu'), + # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' + # RuntimeError: "neg_conj_cuda" not implemented for 'ComplexHalf' + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + OpInfo('empty_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + reference_inputs_func=reference_inputs_like_fns, + supports_autograd=False, + skips=( + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), + "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: empty_like is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('zeros_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + error_inputs_sparse_func=error_inputs_sparse_like_fns, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsc), + skips=( + )), + OpInfo('ones_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + skips=( + )), + OpInfo('randn', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.complex32), + op=lambda *args, **kwargs: wrapper_set_seed(torch.randn, *args, **kwargs), + supports_out=True, + sample_inputs_func=sample_inputs_randn, + supports_autograd=False, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + # CPU randn generates different values based on the strides of out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), + # randn fails to warn when resizing its out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), + )), + OpInfo('randn_like', + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.complex32), + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + error_inputs_sparse_func=error_inputs_sparse_like_fns, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsc), + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('rand_like', + dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex32, torch.complex64, torch.complex128), + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_like_fns, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('randint', + dtypes=all_types_and(torch.half, torch.bfloat16), + op=lambda *args, **kwargs: + wrapper_set_seed(torch.randint, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_randint, + supports_autograd=False, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), + DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), + # CPU randint generates different values based on the strides of out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # randint fails to warn when resizing its out tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Tests that assume input tensor has a meaningful effect on output tensor + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_multiple_devices', + dtypes=[torch.float32, torch.int64], active_if=TEST_WITH_ROCM), + )), + OpInfo('randint_like', + dtypes=all_types_and(torch.half, torch.bfloat16), + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.randint_like, inp, *args, **kwargs), + supports_out=False, + sample_inputs_func=sample_inputs_randint_like, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('full_like', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_full_like, + supports_autograd=False, + skips=( + )), + OpInfo('new_zeros', + op=lambda x, *args, **kwargs: x.new_zeros(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_fns, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + supports_autograd=False), + OpInfo('new_ones', + op=lambda x, *args, **kwargs: x.new_ones(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_fns, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + supports_autograd=False), + OpInfo('ones', + op=torch.ones, + supports_autograd=False, + supports_varargs=True, + is_factory_function=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=True, + sample_inputs_func=sample_inputs_ones_zeros, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('zeros', + op=torch.zeros, + supports_autograd=False, + is_factory_function=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=True, + sample_inputs_func=sample_inputs_ones_zeros, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('full', + op=torch.full, + supports_autograd=False, + is_factory_function=True, + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=True, + sample_inputs_func=sample_inputs_full, + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Same failure as arange: cannot find linspace in captured graph + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # RuntimeError: UNSUPPORTED DTYPE: bool + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)), + )), + OpInfo('new_empty', + op=lambda x, *args, **kwargs: x.new_empty(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_fns, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + supports_autograd=False), + OpInfo('new_empty_strided', + op=lambda x, *args, **kwargs: x.new_empty_strided(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=partial(sample_inputs_new_fns, is_strided=True), + supports_autograd=False, + skips=( + # FX failed to normalize op + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Lazy tensor failures + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness'), + DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestDecomp', 'test_quick'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestProxyTensorOpInfo', 'test_make_fx_exhaustive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestProxyTensorOpInfo', 'test_make_fx_fake_exhaustive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive'), + DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), + 'TestNNCOpInfo', 'test_nnc_correctness'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('empty_strided', + op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.empty_strided, inp, *args, **kwargs), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.half), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_empty_strided, + skips=( + # FX failed to normalize op - add the op to the op_skip list. + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCompositeCompliance', 'test_operator'), + # Lazy tensor failures + DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestLazyOpInfo'), + # RuntimeError: unsupported operation: more than one element of the written-to tensor refers to a single + # memory location. Please clone() the tensor before performing the operation. + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), + )), + OpInfo('empty', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_empty, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + # requires_grad doesn't exist in the jit schema + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestLazyOpInfo'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('eye', + dtypes=all_types_and_complex_and(torch.bool, torch.half), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_eye, + error_inputs_func=error_inputs_eye, + supports_out=True, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # TODO: same as this? + # https://github.com/pytorch/pytorch/issues/81774 + # also see: arange, new_full + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + )), + OpInfo('empty_permuted', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_empty_permuted, + error_inputs_func=error_inputs_empty_permuted, + supports_out=False, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), + # Empty tensor data is garbage so it's hard to make comparisons with it. + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), 'TestCompositeCompliance', + 'test_operator'), + # requires_grad doesn't exist in the jit schema + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestLazyOpInfo'), + DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), + 'TestCommon', 'test_complex_half_reference_testing'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + )), + OpInfo('scalar_tensor', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_scalar_tensor, + supports_autograd=False, + supports_out=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + # fails to match any schemas despite working in the interpreter + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + )), + OpInfo('new_full', + op=lambda x, *args, **kwargs: x.new_full(*args, **kwargs), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_out=False, + sample_inputs_func=sample_inputs_new_full, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + ), + supports_autograd=False), + OpInfo('multinomial', + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.multinomial, inp, *args, **kwargs), + method_variant=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.Tensor.multinomial, inp, *args, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.half), + supports_out=True, + sample_inputs_func=sample_inputs_multinomial, + error_inputs_func=error_inputs_multinomial, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Strides are not the same! + # This may not be reproducible in CI + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_autograd=False), + OpInfo('normal', + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.normal, inp, *args, **kwargs), + # The inplace variant (Tensor.normal_) is different from torch.normal + inplace_variant=None, + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + sample_inputs_func=sample_inputs_normal_tensor_first, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Tensor-likes are not close! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # Computed gradient is incorrect -- would be an exfail but gradgrad somehow passes + DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # RuntimeError: Difference from {dtype} is larger with decomposition + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick'), + # The inplace variant (Tensor.normal_) is different from torch.normal + # inplace varaint Tensor.normal_ is decomposed using randn_like() + DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'))), + OpInfo('normal', + # This has its own variant b/c OpInfos assume the first arg is a Tensor but it is not here + variant_test_name='number_mean', + op=lambda std, mean, *args, **kwargs: + wrapper_set_seed(torch.normal, mean, std, *args, **kwargs), + # The inplace variant (Tensor.normal_) is different from torch.normal + inplace_variant=None, + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + sample_inputs_func=sample_inputs_normal_tensor_second, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestEagerFusionOpInfo'), + DecorateInfo(unittest.skip("Skipped!"), 'TestOperators'), + # AssertionError + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive'), + # AssertionError + DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick'), + # AssertionError in CUDA variant + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestDeviceUtils', 'test_device_mode_ops'))), + OpInfo('bernoulli', + op=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.bernoulli, inp, *args, **kwargs), + # The inplace variant (Tensor.bernoulli_) is different from torch.bernoulli + inplace_variant=None, + method_variant=lambda inp, *args, **kwargs: + wrapper_set_seed(torch.Tensor.bernoulli, inp, *args, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.half), + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_bernoulli, + error_inputs_func=error_inputs_bernoulli, + skips=( + # vmap: We do not yet support calling random operations inside of vmap + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Expected RuntimeError when doing an unsafe cast from a result of + # dtype torch.float32 into an out= with dtype torch.lon + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'))), + OpInfo('scatter_add', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_scatter_add, + error_inputs_func=error_inputs_scatter_and_scatter_add, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo('stack', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_stack, + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # https://github.com/pytorch/pytorch/issues/77046 + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ), + ), + OpInfo('hstack', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_hstack_dstack_vstack, + error_inputs_func=error_inputs_hstack_dstack_vstack, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + BinaryUfuncInfo('hypot', + dtypes=floating_types_and(torch.bfloat16, torch.half), + dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_rhs_python_scalar=False), + OpInfo('histogram', + dtypes=floating_types(), + dtypesIfCUDA=_dispatch_dtypes(), # histogram is only implemented on CPU + sample_inputs_func=sample_inputs_histogram, + supports_autograd=False, + skips=( + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + # RuntimeError: + # undefined value tensor: + # File "", line 3 + # def the_method(i0): + # return torch.histogram(i0, 1, weight=tensor(-0.5735, dtype=torch.float32), density=False) + # ~~~~~~ <--- HERE + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Not Implemented on XLA. + DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla'), + )), + OpInfo('histogramdd', + dtypes=floating_types(), + dtypesIfCUDA=_dispatch_dtypes(), # histogramdd is only implemented on CPU + sample_inputs_func=sample_inputs_histogramdd, + error_inputs_func=error_inputs_histogramdd, + supports_autograd=False, + skips=( + # Not implemented on CUDA + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('histc', + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64), + sample_inputs_func=sample_inputs_histc, + supports_out=True, + supports_autograd=False, + skips=( + # CUDA histc returns a float tensor but does not correctly warn when passed an integral out tensor + # "AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast + # from a result of dtype torch.float32 into an out= with dtype torch.long" + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), + )), + OpInfo('bincount', + dtypes=integral_types_and(), + sample_inputs_func=sample_inputs_bincount, + supports_out=False, + supports_autograd=False, + skips=( + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('bucketize', + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_bucketize, + reference_inputs_func=reference_inputs_bucketize, + error_inputs_func=error_inputs_bucketize, + supports_autograd=False, + skips=( + # JIT tests don't work with Tensor keyword arguments + DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('searchsorted', + dtypes=all_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_searchsorted, + supports_autograd=False, + ref=reference_searchsorted, + skips=( + # JIT tests don't work with Tensor keyword arguments + # https://github.com/pytorch/pytorch/issues/58507 + DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), + )), + OpInfo('cat', + ref=_cat_np, + aliases=('concat', 'concatenate'), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), + sample_inputs_func=sample_inputs_cat_concat, + reference_inputs_func=reference_inputs_cat, + error_inputs_func=error_inputs_cat, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + assert_autodiffed=True, + skips=( + # https://github.com/pytorch/pytorch/issues/89353 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref_mps'), + # RuntimeError: Arguments for call not valid. + # Expected a value of type 'List[Tensor]' for argument + # 'tensors' but instead found type 'Tensor (inferred)'. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), + # see https://github.com/pytorch/pytorch/issues/71286 + DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), + # see https://github.com/pytorch/pytorch/issues/99806 + # RuntimeError: The size of tensor a (25) must match the size of tensor b (0) at non-singleton dimension 0. + DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), + )), + OpInfo('unbind', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + ref=reference_unbind, + sample_inputs_func=sample_inputs_unbind, + error_inputs_func=error_inputs_unbind, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_gradgrad=True, + supports_out=False, + ), + OpInfo('vstack', + aliases=('row_stack',), + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_hstack_dstack_vstack, + error_inputs_func=error_inputs_hstack_dstack_vstack, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: _fn() Expected a value of type + # 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'. + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)), + OpInfo('dstack', + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_hstack_dstack_vstack, + error_inputs_func=error_inputs_hstack_dstack_vstack, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + ), + OpInfo('unfold', + op=lambda x, *args: x.unfold(*args), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Skip operator schema test because this is a functional and not an operator + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + ), + sample_inputs_func=sample_inputs_unfold), + OpInfo('unfold_copy', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_gradgrad=False, + # See https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_unfold), + OpInfo('msort', + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + check_batched_gradgrad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_msort, + skips=( + )), + OpInfo('movedim', + aliases=('moveaxis',), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_movedim_moveaxis, + reference_inputs_func=reference_movedim_moveaxis, + error_inputs_func=error_movedim_moveaxis), + OpInfo('renorm', + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_renorm, + error_inputs_func=error_inputs_renorm, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: Difference from float64 is larger with decomposition + # linalg_vector_norm.default than original on output 0. + # Original max diff: 2.560596747969157e-07, + # Decomp max diff: 1.8187482915266173e-06 + DecorateInfo(unittest.skip("Inconsistent accuracy"), 'TestDecomp', 'test_comprehensive', + device_type='cpu', dtypes=(torch.float16,)), + )), + ShapeFuncInfo('repeat', + op=lambda x, dims: x.repeat(dims), + ref=np.tile, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_repeat_tile, + skips=( + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + )), + OpInfo('squeeze', + ref=_squeeze_ref, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + assert_jit_shape_analysis=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_squeeze), + OpInfo('squeeze', + ref=_squeeze_ref, + variant_test_name="multiple", + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_squeeze_multiple), + UnaryUfuncInfo( + 'fill', + ref=_fill_np, + method_variant=None, + sample_kwargs=_fill_sample_kwargs, + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'value': True}), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + skips=( + # JIT has issue when op is passed as lambda + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip("No fill_ op"), 'TestCudaFuserOpInfo'), + DecorateInfo(unittest.skip("No fill_ op"), 'TestNNCOpInfo'), + )), + OpInfo('resize_', + op=lambda x, shape: x.clone().resize_(shape), + method_variant=None, + inplace_variant=torch.Tensor.resize_, + # the test fails because resize_ doesn't work with imag views as expected by the test + # https://github.com/pytorch/pytorch/issues/65945 + test_neg_view=False, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + skips=( + # Cannot resize variables that require grad + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), + ), + sample_inputs_func=sample_inputs_resize_ops), + OpInfo('resize_as_', + op=lambda x, other: torch.resize_as_(x.clone(), other), + method_variant=None, + inplace_variant=torch.Tensor.resize_as_, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + skips=( + # Cannot resize variables that require grad + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), + DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), + ), + sample_inputs_func=sample_inputs_resize_ops), + OpInfo('take_along_dim', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_take_along_dim, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + decorators=( + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + ShapeFuncInfo('tile', + ref=np.tile, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_repeat_tile), + OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid' + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_trapezoid), + OpInfo('trapezoid', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_trapezoid), + OpInfo('cumulative_trapezoid', + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + supports_out=False, + sample_inputs_func=sample_cumulative_trapezoid,), + OpInfo('unsqueeze', + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + assert_jit_shape_analysis=True, + assert_autodiffed=True, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + sample_inputs_func=sample_unsqueeze), + BinaryUfuncInfo('xlogy', + aliases=('special.xlogy',), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + promotes_int_to_float=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_one_python_scalar=True, + # We don't test 0 as the gradient will be NaN and it'll break + rhs_make_tensor_kwargs=dict(low=0.01)), + OpInfo('zero_', + op=lambda x: torch.zero_(x.clone()), + method_variant=None, + inplace_variant=torch.Tensor.zero_, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_gradgrad=True, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + ), + sample_inputs_func=sample_inputs_zero_), + OpInfo('logsumexp', + aliases=('special.logsumexp',), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + gradcheck_fast_mode=False, + sample_inputs_func=sample_inputs_logsumexp, + reference_inputs_func=reference_inputs_logsumexp), + OpInfo('trace', + dtypes=all_types_and_complex(), + dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), + error_inputs_func=error_inputs_trace, + supports_inplace_autograd=False, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_trace), + OpInfo('transpose', + ref=_numpy_ref_transpose, + aliases=('swapdims', 'swapaxes'), + assert_jit_shape_analysis=True, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + sample_inputs_func=sample_inputs_transpose_swapdims), + OpInfo('T', + op=lambda x: x.T, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_T, + error_inputs_func=error_inputs_T), + OpInfo('H', + op=lambda x: x.H, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_T), + OpInfo('mT', + op=lambda x: x.mT, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_adjoint), + OpInfo('mH', + op=lambda x: x.mH, + aliases=('adjoint',), + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), + sample_inputs_func=sample_inputs_adjoint), + OpInfo('tril', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_tril_triu, + sample_inputs_func=sample_inputs_tril_triu), + OpInfo('triu', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + error_inputs_func=error_inputs_tril_triu, + sample_inputs_func=sample_inputs_tril_triu), + OpInfo('triu_indices', + dtypes=_dispatch_dtypes((torch.int32, torch.int64)), + sample_inputs_func=sample_inputs_trilu_indices, + ref=lambda h, w, ofs=0, dtype=torch.long, device='cpu' : np.array(np.triu_indices(h, ofs, w), dtype=dtype), + supports_out=False, + supports_autograd=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + OpInfo('tril_indices', + dtypes=_dispatch_dtypes((torch.int32, torch.int64)), + sample_inputs_func=sample_inputs_trilu_indices, + ref=lambda h, w, ofs=0, dtype=torch.long, device='cpu' : np.array(np.tril_indices(h, ofs, w), dtype=dtype), + supports_out=False, + supports_autograd=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + OpInfo('kron', + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_inplace_autograd=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_kron, + decorators=( + # RuntimeError: view size is not compatible with input tensor's size and stride + DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), + )), + OpInfo('inner', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_inner, + ), + OpInfo('tensordot', + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + sample_inputs_func=sample_inputs_tensordot, + skips=( + # Skip operator schema test because this is a functional and not an operator. + # Reference: https://github.com/pytorch/pytorch/issues/54574 + DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + ) + ), + OpInfo('to_sparse', + op=lambda x, *args: x.to_sparse(*args), + sample_inputs_func=sample_inputs_to_sparse, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + backward_dtypes=floating_types(), + backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_sparse_csr=True, + supports_sparse_csc=True, + check_batched_grad=False, + check_batched_gradgrad=False, + skips=( + # NotImplementedError: Could not run 'aten::normal_' with arguments from the 'SparseCPU' backend + DecorateInfo(unittest.skip(""), 'TestCommon', 'test_noncontiguous_samples'), + # TODO: FIXME: complex inputs requiring grad error in forward + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'), + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # Allowed exception: sparse tensors don't have strides + DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), + DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.skip("Allowed exception"), 'TestTags', 'test_tags'), + # TODO: implement csr.to_sparse(sample_dim) where sampled_dim is 1. + DecorateInfo(unittest.skip("csr.to_sparse(1) not implemented. Skipped!"), + 'TestSparseCSR', 'test_sparse_csr_consistency'), + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + ) + ), + OpInfo('logcumsumexp', + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), + backward_dtypes=floating_and_complex_types_and(torch.bfloat16), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it. + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cuda'), + # RuntimeError: "max_values_cpu" not implemented for 'ComplexDouble' + # Falling back to non-numerically stablized exp, causing nan in the results. + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD', dtypes=[torch.complex128]), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), + ), + sample_inputs_func=sample_inputs_logcumsumexp, + error_inputs_func=error_inputs_logcumsumexp), + UnaryUfuncInfo('sigmoid', + aliases=('special.expit', 'nn.functional.sigmoid'), + aten_backward_name='sigmoid_backward', + ref=reference_sigmoid if TEST_SCIPY else None, + decorators=(precisionOverride({torch.float16: 1e-2, + torch.complex64: 1e-1, + torch.bfloat16: 1e-2}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/56012 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.complex64, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.chalf, torch.complex64, torch.cdouble])), + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.complex32, torch.bool, torch.half, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + assert_autodiffed=True, + # sigmoid(z) = 1 / (1 + exp(-z)), at z = j * pi * odd_number, the denominator is zero + reference_numerics_filter=NumericsFilter( + condition=lambda x: (close_to_int(x / (math.pi * 1j)) + if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), + safe_val=0)), + UnaryUfuncInfo('digamma', + ref=scipy.special.digamma if TEST_SCIPY else None, + aliases=('special.psi', 'special.digamma',), + decorators=(precisionOverride({torch.float16: 5e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('erf', + ref=scipy.special.erf if TEST_SCIPY else None, + aliases=('special.erf', ), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), + 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), + + ), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + assert_jit_shape_analysis=True, + supports_sparse=True, + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('erfc', + ref=scipy.special.erfc if TEST_SCIPY else None, + aliases=('special.erfc', ), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + assert_autodiffed=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True), + UnaryUfuncInfo('erfinv', + ref=scipy.special.erfinv if TEST_SCIPY else None, + aliases=('special.erfinv', ), + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2, + torch.float32: 1e-4}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), + supports_sparse_csr=True, + supports_sparse_csc=True, + supports_sparse_bsr=True, + supports_sparse_bsc=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + domain=(-1, 1), + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + )), + OpInfo("nn.functional.smooth_l1_loss", + ref=reference_smooth_l1_loss, + sample_inputs_func=sample_inputs_smooth_l1_loss, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + backward_dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16), + backward_dtypesIfCUDA=floating_types_and(torch.float16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED + # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),)), + OpInfo( + "nn.functional.l1_loss", + ref=loss_reference_reduction_wrapper(lambda input, target: np.abs(input - target)), + sample_inputs_func=sample_inputs_l1_loss, + error_inputs_func=error_inputs_l1_loss, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED + # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. + DecorateInfo( + unittest.expectedFailure, + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32,), + ), + ), + ), + UnaryUfuncInfo('lgamma', + ref=reference_lgamma if TEST_SCIPY else None, + aliases=('special.gammaln', ), + decorators=(precisionOverride({torch.float16: 7e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.bool, torch.half), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + ), + # lgamma have multiple singularities at x <= 0 + reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), + OpInfo( + 'logdet', + dtypes=floating_and_complex_types(), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet, + decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), + # `log_softmax` supports different dtypes based on whether `dtype` argument, + # is passed or not. Hence two OpInfo entries, one with dtype and other without. + OpInfo( + 'log_softmax', + aliases=('special.log_softmax', 'nn.functional.log_softmax'), + supports_out=True, + aten_backward_name='_log_softmax_backward_data', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_softmax_variant, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + OpInfo( + 'log_softmax', + variant_test_name='with_dtype', + aliases=('special.log_softmax', 'nn.functional.log_softmax'), + supports_out=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True), + UnaryUfuncInfo('logit', + aten_backward_name='logit_backward', + ref=scipy.special.logit if TEST_SCIPY else None, + domain=(0, 1), + aliases=('special.logit', ), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_float=True, + decorators=(precisionOverride({torch.bfloat16: 5e-1, + torch.float16: 5e-1}),), + dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_logit), + OpInfo('where', + # Currently only the `input` is tested in gradcheck. + # If we pass `condition` first, none of the input which supports + # autograd will be tested. Hence the following lambda. + op=lambda self, condition, other: torch.where(condition, self, other), + ref=lambda self, condition, other: np.where(condition, self, other), + sample_inputs_func=sample_inputs_where, + reference_inputs_func=reference_inputs_where, + error_inputs_func=error_inputs_where, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + decorators=( + DecorateInfo(onlyCUDA, "TestCommon", 'test_errors'),), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + ), + dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf)), + OpInfo('nonzero', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_nonzero, + supports_autograd=False, + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # nonzero(): argument 'out' must be Tensor, not tuple + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # https://github.com/pytorch/pytorch/issues/67458 + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # nonzero is not raising a warning when the out is resized + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + # Can't find schemas for this operator for some reason + DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + )), + OpInfo('nonzero_static', + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), + sample_inputs_func=sample_inputs_nonzero_static, + supports_out=False, + supports_autograd=False, + decorators=[onlyCPU], + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), + DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), + DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'), + DecorateInfo(unittest.expectedFailure, 'TestVmapOperatorsOpInfo', 'test_op_has_batch_rule'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + )), + # Following tests are for jiterator's python interface + # Jiterator can be used to author elementwise CUDA kernel + # jiterator._create_jit_fn returns a callable that behaves like a regular pytorch op + # See create_jit_fn in jiterator.py for more information + UnaryUfuncInfo( + 'jiterator_unary', + op=torch.cuda.jiterator._create_jit_fn("template T unary(T x) { return x * x + x; }"), + ref=lambda x: x * x + x, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + decorators=[ + onlyCUDA, + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_hard'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_normal'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), + 'TestUnaryUfuncs', 'test_reference_numerics_small'), + ], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Skip reference_numerics tests for bool type, as the defined function doesn't work for bool + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', + dtypes=[torch.bool]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard', + dtypes=[torch.bool]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', + dtypes=[torch.bool]), + # ROCm generates -inf+infj instead of nan+infj for complex64 for some of the results + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', + dtypes=[torch.complex64], active_if=TEST_WITH_ROCM), + # Expected failure: torch.jiterator_unary is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + BinaryUfuncInfo( + 'jiterator_binary', + op=torch.cuda.jiterator._create_jit_fn( + "template T binary(T x, T y, T alpha) { return x + alpha * y; }", alpha=1), + ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ + else np.add(input, np.multiply(alpha, other)), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-3.14), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + supports_rhs_python_scalar=False, + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_binary is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + OpInfo( + 'jiterator_4inputs_with_extra_args', + op=torch.cuda.jiterator._create_jit_fn( + "template T binary(T i0, T i1, T i2, T i3, T alpha, T beta) { return alpha * i0 + beta * i1 + i2 + i3; }", + alpha=1, beta=1), + ref=lambda i0, i1, i2, i3, *, alpha=1, beta=1: alpha * i0 + beta * i1 + i2 + i3, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=4, alpha=3.14, beta=-4.20), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + BinaryUfuncInfo( + 'jiterator_binary_return_by_ref', + op=torch.cuda.jiterator._create_multi_output_jit_fn( + """ + template + void binary_return_by_ref(T i0, T i1, T& out0) { + out0 = i0 + i1; + } + """, + num_outputs=1), + ref=lambda i0, i1: i0 + i1, + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-0.42), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + supports_rhs_python_scalar=False, + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + OpInfo( + 'jiterator_2inputs_2outputs', + op=torch.cuda.jiterator._create_multi_output_jit_fn( + """ + template + void binary_2outputs(T i0, T i1, T& out0, T& out1) { + out0 = i0 + i1; + out1 = i0 - i1; + } + """, + num_outputs=2), + ref=lambda i0, i1, *, alpha=1: (i0 + i1, i0 - i1), + dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), + sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2), + supports_out=False, + supports_autograd=False, # jiterator ops doesn't have backward defined + decorators=[onlyCUDA], + skips=( + # Jiterator ops doesn't support neg or conj view + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + # Jiterator ops doesn't support CompositeCompliantTensor + # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped + DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), + # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # Skip Nvfuser + DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), + ) + ), + # `torch.norm` has multiple code paths depending on the value of `p`. + # These paths have different dtype support. Also JIT supports, + # most variants but not all of them. So we split the OpInfo entries, + # for `norm` based on the code-paths and JIT support. + OpInfo( + "norm", + sample_inputs_func=sample_inputs_norm, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + # TODO Benchmark again with the new implementation + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # Dispatches in Python to vector_norm. Not sure how to make this test happy + # Happens to pass on complex64. Also a mystery + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float32,)),) + ), + OpInfo('norm', + variant_test_name='nuc', + sample_inputs_func=sample_inputs_norm_nuc, + decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], + check_batched_gradgrad=False, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients + # got: Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_and_complex_types(), + dtypesIfCUDA=floating_and_complex_types(), + skips=( + # Dispatches in Python to matrix_norm. Not sure how to make this test happy + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.complex64, torch.float32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_crossref_backward_amp', + device_type='cuda', dtypes=[torch.float32], active_if=TEST_WITH_ROCM), + DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_crossref_backward_no_amp', + device_type='cuda', dtypes=[torch.float32], active_if=TEST_WITH_ROCM),) + ), + OpInfo('norm', + variant_test_name='fro', + sample_inputs_func=sample_inputs_norm_fro, + dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients + # got: Could not allocate memory to change Tensor SizesAndStrides! + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + skips=( + # MPS has some mild accuracy issues for float16. We divide the tolerances by 10 + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-4, rtol=0.01)}), + 'TestConsistency', + 'test_output_match', + + ), + # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 + DecorateInfo( + unittest.skip("Skipped!"), + 'TestSchemaCheckModeOpInfo', + 'test_schema_correctness', + dtypes=(torch.complex64, torch.complex128)), + # Dispatches in Python to vector_norm. Not sure how to make this test happy + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.complex64, torch.float32,)),) + ), + OpInfo( + "norm", + variant_test_name="inf", + sample_inputs_func=sample_inputs_norm_inf, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + # fast gradcheck produces NaNs + gradcheck_fast_mode=False, + skips=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=2e-3, rtol=1e-3)}), + 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda', + ), + # Dispatches in Python to vector_norm. Not sure how to make this test happy + # Happens to pass on complex64. Also a mystery + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', + dtypes=(torch.float32,)) + ), + ), + OpInfo('t', + sample_inputs_func=sample_inputs_t, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + # vmap does not support inplace views + check_inplace_batched_forward_grad=False, + autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused + autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + assert_autodiffed=True, + error_inputs_func=error_inputs_t), + OpInfo( + "nn.functional.dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Probably because we have used lambda for the op here + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # inplace variant dispatches to dropout kernel, while on CUDA + # the op dispatches to _fused_dropout (with a few more conditions) + # hence, different values and this skip here + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # https://github.com/pytorch/pytorch/issues/66357 + check_batched_forward_grad=False, + supports_out=False, + sample_inputs_func=sample_inputs_dropout, + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs, inplace=True)), + OpInfo( + "native_dropout_backward", + op=torch.ops.aten.native_dropout_backward.default, + aten_name="native_dropout_backward", + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_dropout_backward, + skips=( + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + # Lazy tensor failures + DecorateInfo(unittest.skip('Skipped!'), 'TestLazyOpInfo', 'test_dispatched_to_lazy'), + # These tests fail only when built with ASAN + DecorateInfo(unittest.skip("Fails with ASAN"), 'TestLazyOpInfo', 'test_correctness', active_if=TEST_WITH_ASAN), + DecorateInfo( + unittest.skip("Fails with ASAN"), + 'TestLazyOpInfo', + 'test_correctness_with_reusing_ir', + active_if=TEST_WITH_ASAN + ), + ), + ), + OpInfo( + "nn.functional.dropout2d", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + check_batched_forward_grad=False, + # As per the docs, valid input dims are (3, 4) + sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(3, 4)), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.dropout3d", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + check_batched_forward_grad=False, + # As per the docs, valid input dims are (4, 5) + sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(4, 5)), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.alpha_dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.alpha_dropout, input, *args, **kwargs), + dtypes=floating_types_and(torch.float16, torch.bfloat16), + gradcheck_wrapper=wrapper_set_seed, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=sample_inputs_dropout, + check_batched_forward_grad=False, + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.alpha_dropout, input, *args, **kwargs, inplace=True), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # AssertionError: Tensor-likes are not close! + # Fails in cuda11.7 + # Error Log: https://github.com/pytorch/pytorch/actions/runs/3440108478/jobs/5738475757 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu', device_type='cuda'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),), + # In training mode, feature_alpha_dropout currently doesn't support inputs of complex dtype + # unlike when `train=False`, it supports complex inputs, hence 2 OpInfos to cover all cases + OpInfo( + "nn.functional.feature_alpha_dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), + variant_test_name="with_train", + dtypes=floating_types_and(torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: + # vmap: We do not yet support calling random operations inside of vmap. + # Please perform random operations outside of vmap as a workaround + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', "test_forward_mode_AD"), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', "test_inplace_forward_mode_AD"), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + # As per the docs, valid input dims are (4, 5) + sample_inputs_func=partial(sample_inputs_dropout, train=True, valid_input_dim=(4, 5)), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.feature_alpha_dropout", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), + variant_test_name="without_train", + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),), + gradcheck_wrapper=wrapper_set_seed, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + supports_out=False, + sample_inputs_func=partial(sample_inputs_dropout, train=False), + inplace_variant=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), + OpInfo( + "nn.functional.one_hot", + ref=reference_one_hot, + supports_out=False, + dtypes=_dispatch_dtypes((torch.int64,)), + sample_inputs_func=sample_inputs_one_hot, + ), + OpInfo( + "nn.functional.embedding", + aten_backward_name="embedding_dense_backward", + # We use lambda to reshuffle the positional arguments. + # This is because currently only the `input` field of SampleInput + # is tested in gradient tests. + op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_embedding, + error_inputs_func=error_inputs_embedding, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Fails on CI https://github.com/pytorch/pytorch/issues/85377 + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), + # Reference: https://github.com/pytorch/pytorch/issues/67084 + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'), + # Not a problem: embedding does weird stuff to its input (it renormalizes) + DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), + ), + supports_expanded_weight=True, + supports_out=False, + ), + OpInfo( + "nn.functional.embedding_bag", + # We use lambda to reshuffle the positional arguments. + # This is because currently only the `input` field of SampleInput + # is tested in gradient tests. + op=lambda weight, idx, **kwargs: torch.nn.functional.embedding_bag(idx, weight, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + # backward is not supported for mode `max` and dtype `bfloat16` + backward_dtypesIfCUDA=floating_types_and(torch.float16), + sample_inputs_func=sample_inputs_embedding_bag, + skips=( + # lambda impl + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), + # Not a problem: embedding_bag does weird stuff to its input (it renormalizes) + DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), + ), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + supports_out=False, + supports_gradgrad=False, + ), + OpInfo( + "nn.functional.multi_head_attention_forward", + op=lambda input, *args, **kwargs: + wrapper_set_seed(torch.nn.functional.multi_head_attention_forward, input, *args, **kwargs), + dtypes=floating_types_and(torch.bfloat16, torch.float16), + sample_inputs_func=sample_inputs_multi_head_attention_forward, + skips=( + # Tensor-likes are not close + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples', dtypes=(torch.float32,)), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_comprehensive'), + + # TODO skip this for now since we can't skip on runtime arch support (taken from scaled_dot_product_attention) + DecorateInfo(unittest.skip("Skipped!"), 'TestInductorOpInfo', 'test_comprehensive'), + # randomness + DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + # lambda impl + # AssertionError: JIT Test does not execute any logic + DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), + # tests running very slowly break slow tests, so we skip them instead of using `slowTest`. + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), + DecorateInfo( + unittest.skip("Skipped - baddbmm decomp does not have enough precision for 16-bit float"), + 'TestDecomp', + 'test_comprehensive', + dtypes=(torch.bfloat16, torch.float16), + ), + DecorateInfo( + unittest.skip("Skipped - baddbmm decomp does not have enough precision for 16-bit float"), + 'TestDecomp', + 'test_quick', + dtypes=(torch.bfloat16, torch.float16))), + supports_out=False, + supports_gradgrad=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + ), + UnaryUfuncInfo( + "nn.functional.softplus", + aten_backward_name='softplus_backward', + ref=reference_softplus, + sample_kwargs=lambda device, dtype, input: ({'beta': 3, 'threshold': .2}, {'beta': 3, 'threshold': .2}), + sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'beta': 3, 'threshold': .2}), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.bfloat16, torch.float16), + decorators=( + DecorateInfo( + toleranceOverride + ({ + torch.half: tol(atol=1e-2, rtol=1e-2), + torch.bfloat16: tol(atol=1e-2, rtol=1e-2), + }), + 'TestUnaryUfuncs'), + ), + ), + OpInfo( + "nn.functional.mse_loss", + aten_backward_name='mse_loss_backward', + ref=loss_reference_reduction_wrapper(lambda input, target: (input - target) ** 2), + sample_inputs_func=sample_inputs_loss, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.float16), + backward_dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + backward_dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), + skips=( + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, + # please report a bug to PyTorch. + DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + ), + ), + OpInfo( + "nn.functional.grid_sample", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_grid_sample, + reference_inputs_func=reference_inputs_grid_sample, + supports_gradgrad=False, + gradcheck_nondet_tol=1e-15), + # TODO: delete this OpInfo once we add meta support for grid_sampler_3d + OpInfo( + "grid_sampler_2d", + dtypes=floating_types(), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_grid_sampler_2d, + supports_gradgrad=False, + gradcheck_nondet_tol=1e-15), + OpInfo( + "argwhere", + ref=np.argwhere, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_autograd=False, + sample_inputs_func=sample_inputs_argwhere, + skips=( + # Compiler issue on ROCm. Might need to skip until ROCm5.5 + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values', + dtypes=[torch.bool], active_if=TEST_WITH_ROCM), + ), + ), + ReductionOpInfo( + 'all', + identity=True, + supports_autograd=False, + result_dtype=torch.bool, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.all), + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), + ), + ), + ReductionOpInfo( + 'any', + identity=False, + supports_autograd=False, + result_dtype=torch.bool, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.any), + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), + ), + ), + ReductionOpInfo( + 'amax', + nan_policy='propagate', + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + ref=reference_reduction_numpy(np.amax), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + error_inputs_func=error_inputs_aminmax_amax_amin, + ), + ReductionOpInfo( + 'amin', + nan_policy='propagate', + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + ref=reference_reduction_numpy(np.amin), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + error_inputs_func=error_inputs_aminmax_amax_amin, + ), + ReductionOpInfo( + 'argmax', + supports_multiple_dims=False, + supports_autograd=False, + assert_jit_shape_analysis=True, + result_dtype=torch.int64, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmax, supports_keepdims=False), + ), + ReductionOpInfo( + 'argmin', + supports_multiple_dims=False, + supports_autograd=False, + result_dtype=torch.int64, + dtypes=all_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.argmin, supports_keepdims=False), + ), + ReductionOpInfo( + 'count_nonzero', + identity=0, + supports_out=False, + supports_autograd=False, + result_dtype=torch.int64, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_reduction_count_nonzero, + ref=reference_reduction_numpy(np.count_nonzero), + skips=( + # FIXME: count_nonzero does not accept keepdim kwarg + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_offbounds_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + ), + ), + ReductionOpInfo( + 'mean', + nan_policy='propagate', + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # FIXME: mean needs 'dim' parameter when using the 'out' overload. + # Adding it with 'generate_args_kwargs' does not work, since these also get passed + # onto the reference implementations. + supports_out=False, + assert_autodiffed=True, + assert_jit_shape_analysis=True, + promotes_int_to_float=True, + dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), + ref=reference_reduction_numpy(np.mean), + error_inputs_func=error_inputs_mean, + skips=( + # FIXME: mean does not support passing keepdim without passing dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: mean reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', + device_type='cuda', dtypes=[torch.complex64]), + ), + ), + ReductionOpInfo( + 'nanmean', + nan_policy='omit', + assert_autodiffed=True, + promotes_int_to_float=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True), + ref=reference_reduction_numpy(np.nanmean), + skips=( + # AssertionError: False is not true : + # Failure in testing nodes' autodifferentiation. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # FIXME: prod reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + device_type='cuda', dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', + device_type='cuda', dtypes=[torch.complex64]), + ), + ), + ReductionOpInfo( + 'std', + nan_policy='propagate', + supports_out=True, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + promotes_int_to_float=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + ref=reference_std_var(np.std), + generate_args_kwargs=generate_std_var_kwargs, + skips=( + # FIXME: cannot specify keepdim without dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=(torch.float16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + dtypes=(torch.float16,)), + ), + ), + ReductionOpInfo( + 'std', + variant_test_name='unbiased', + nan_policy='propagate', + supports_out=False, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + promotes_int_to_float=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + skips=( + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionOpInfo( + 'var', + nan_policy='propagate', + supports_out=True, + assert_autodiffed=True, + promotes_int_to_float=True, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var, + ref=reference_std_var(np.var), + generate_args_kwargs=generate_std_var_kwargs, + skips=( + # FIXME: cannot specify keepdim without dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'), + # NumPy is giving NaN for this + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'), + ), + ), + ReductionOpInfo( + 'var', + variant_test_name='unbiased', + nan_policy='propagate', + supports_out=False, + complex_to_real=True, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_autodiffed=True, + promotes_int_to_float=True, + check_batched_forward_grad=False, + dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), + dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), + sample_inputs_func=sample_inputs_std_var_unbiased, + skips=( + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionOpInfo( + 'prod', + identity=1, + nan_policy='propagate', + supports_multiple_dims=False, + # https://github.com/pytorch/pytorch/issues/80411 + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_int64=True, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_prod, + ref=prod_numpy, + skips=( + # FIXME: prod does not support passing keepdim without passing dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: prod reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: prod does not support passing None to dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16, torch.complex64]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + dtypes=[torch.uint8, torch.float16, torch.complex64]), + # FIXME: ValueError: The data in MaskedTensor a and Tensor b do not match + DecorateInfo(unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', + dtypes=[torch.float16]), + ), + ), + ReductionOpInfo( + 'sum', + identity=0, + nan_policy='propagate', + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + promotes_int_to_int64=True, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + ref=reference_reduction_numpy(np.sum), + error_inputs_sparse_func=error_inputs_sparse_reduction_sum, + sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_coo), + sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_csr), + sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_csc), + sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_bsr), + sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_bsc), + skips=( + # FIXME: sum does not support passing keepdim without passing dim + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), + # FIXME: sum reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', + dtypes=[torch.float16]), + DecorateInfo(unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', + dtypes=[torch.float32]), + ), + ), + ReductionOpInfo( + 'nansum', + identity=0, + nan_policy='omit', + supports_out=True, + promotes_int_to_int64=True, + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True), + ref=reference_reduction_numpy(np.nansum), + skips=( + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), + # FIXME: nansum reduces all dimensions when dim=[] + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: flaky test so skipped instead of xfailed + # possibly bad low precision reference in numpy + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + ), + ), + OpInfo( + "nn.functional.ctc_loss", + dtypes=floating_types(), + supports_out=False, + sample_inputs_func=sample_inputs_ctc_loss, + skips=( + # https://github.com/pytorch/pytorch/issues/67462 + # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0 + DecorateInfo( + unittest.expectedFailure, + "TestBwdGradients", + "test_fn_grad", + dtypes=(torch.float64,), + ), + # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented + DecorateInfo( + unittest.expectedFailure, + "TestBwdGradients", + "test_fn_gradgrad", + dtypes=(torch.float64,), + ), + # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32,), + ), + # Ref: https://github.com/pytorch/pytorch/issues/85231 + DecorateInfo(unittest.skip("Fails with ASAN"), + 'TestProxyTensorOpInfo', + 'test_make_fx_fake_exhaustive', active_if=TEST_WITH_ASAN), + ), + ), + OpInfo( + "nn.functional.cosine_embedding_loss", + dtypes=all_types_and(torch.half, torch.bfloat16, torch.bool), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_cosine_embedding_loss, + ), + OpInfo( + "nn.functional.nll_loss", + dtypes=floating_types_and(torch.bfloat16), + dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + sample_inputs_func=sample_inputs_nll_loss, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + assert_jit_shape_analysis=True, + skips=( + # RuntimeError: + # undefined value tensor: + # File "", line 3 + # def the_method(i0, i1): + # return torch.nn.functional.nll_loss(i0, i1, weight=tensor([8.4784, 1.7658, 4.3228], dtype=torch.float32)) + # ~~~~~~ <--- HERE + DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + ), + ), + OpInfo( + "nn.functional.gaussian_nll_loss", + dtypes=floating_types_and(torch.half, torch.bfloat16), + # Runs very slowly on slow gradcheck - alternatively reduce input sizes + gradcheck_fast_mode=True, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_gaussian_nll_loss, + error_inputs_func=error_inputs_gaussian_nll_loss, + skips=( + # Pre-existing condition (calls .item); needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), + # Pre-existing condition (calls .item); needs to be fixed + DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + ), + ), + OpInfo( + "nn.functional.hinge_embedding_loss", + dtypes=floating_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_hinge_embedding_loss, + error_inputs_func=error_inputs_hinge_embedding_loss, + reference_inputs_func=reference_inputs_hinge_embedding_loss, + ), + OpInfo( + "nn.functional.huber_loss", + aten_backward_name='huber_loss_backward', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + sample_inputs_func=sample_inputs_huber_loss, + error_inputs_func=error_inputs_huber_loss, + skips=( + # JIT does not support variadic tensors. + # RuntimeError: input->type()->kind() == TypeKind::OptionalType + # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, + # please report a bug to PyTorch. + DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), + ) + ), + OpInfo( + "nn.functional.pdist", + ref=reference_pdist, + sample_inputs_func=sample_inputs_pdist, + dtypes=floating_types(), + supports_out=False, + supports_gradgrad=False, + skips=( + DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), + ) + ), + OpInfo( + "nn.functional.poisson_nll_loss", + dtypes=all_types_and(torch.half, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_poisson_nll_loss, + error_inputs_func=error_inputs_poisson_nll_loss, + ), + OpInfo( + "argsort", + dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_sort, + supports_out=False, + supports_autograd=False, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32,), + ), + ), + ), + OpInfo( + "repeat_interleave", + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), + backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), + sample_inputs_func=sample_inputs_repeat_interleave, + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.pairwise_distance", + ref=lambda a, b, p=2.0, eps=1e-6, keepdim=False: ( + np.sum(np.abs(a - b + eps) ** p, axis=-1, keepdims=keepdim) ** (1 / p) + ), + sample_inputs_func=sample_inputs_pairwise_distance, + dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.pixel_shuffle", + sample_inputs_func=sample_inputs_pixel_shuffle, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.pixel_unshuffle", + sample_inputs_func=sample_inputs_pixel_unshuffle, + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + skips=( + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + dtypes=(torch.float32, torch.complex64), + ), + ), + ), + OpInfo( + "nn.functional.kl_div", + sample_inputs_func=sample_inputs_kl_div, + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + ), + OpInfo( + "diagflat", + ref=lambda input, offset=0: np.diagflat(input, k=offset), + sample_inputs_func=sample_inputs_diagflat, + dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), + dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + supports_out=False, + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + # See https://github.com/pytorch/pytorch/pull/78358 + check_batched_forward_grad=False, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='sum', + # complex not added to dtypes as complex gradients are not properly handled + # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='prod', + # complex not added to dtypes as complex gradients are not properly handled + # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + sample_inputs_func=sample_inputs_scatter_reduce, + skips=( + # Not implemented + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_inplace_forward_mode_AD'), + DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), + ), + ), + OpInfo( + 'scatter_reduce', + variant_test_name='mean', + # complex not added to dtypes as complex gradients are not properly handled + # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet + dtypes=all_types_and(torch.float16, torch.bfloat16), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='amin', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + 'scatter_reduce', + variant_test_name='amax', + dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), + dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), + supports_forward_ad=True, + check_batched_forward_grad=False, + supports_fwgrad_bwgrad=True, + sample_inputs_func=sample_inputs_scatter_reduce, + ), + OpInfo( + '_segment_reduce', + aten_name='segment_reduce', + variant_test_name='lengths', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + # RuntimeError: derivative for aten::_segment_reduce_backward is not implemented + supports_gradgrad=False, + sample_inputs_func=sample_inputs_segment_reduce, + skips=( + # FIXME: CUDA driver API confirmed a leak in + # __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32 + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), + OpInfo( + '_segment_reduce', + aten_name='segment_reduce', + variant_test_name='offsets', + dtypes=floating_types_and(torch.float16, torch.bfloat16), + supports_out=False, + # RuntimeError: derivative for aten::_segment_reduce_backward is not implemented + supports_gradgrad=False, + sample_inputs_func=partial(sample_inputs_segment_reduce, mode='offsets'), + skips=( + # FIXME: CUDA driver API confirmed a leak in + # __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32 + DecorateInfo( + unittest.skip("Skipped!"), + "TestJit", + "test_variant_consistency_jit", + device_type="cuda", + ), + ), + ), +] +op_db += opinfo.definitions.op_db + + +# Separate registry for experimental Python Reference OpInfos. +python_ref_db = [ + # + # Elementwise Unary OpInfos + # + ElementwiseUnaryPythonRefInfo( + "_refs.abs", + torch_opinfo_name="abs", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/49224 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + dtypes=[torch.int8], active_if=TEST_WITH_ASAN), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.acos", + torch_opinfo_name="acos", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.acosh", + torch_opinfo_name="acosh", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + # Failing with wrong imaginary sign on at least some Windows jobs + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.asin", + torch_opinfo_name="asin", + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}), + 'TestUnaryUfuncs', device_type='cuda'), + precisionOverride({torch.bfloat16: 1e-2}), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.asinh", + torch_opinfo_name="asinh", + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + PythonRefInfo( + "_refs.lerp", + torch_opinfo_name="lerp", + ), + PythonRefInfo( + "_refs.ones", + torch_opinfo_name="ones", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.zeros", + torch_opinfo_name="zeros", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.cauchy", + torch_opinfo_name="cauchy", + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.exponential", + torch_opinfo_name="exponential", + supports_out=True, + decorators=( + # dtypes that do not support check_uniform_bounds of rand_like + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), + + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: exponential is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: exponential is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip("Expected: exponential is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.geometric", + torch_opinfo_name="geometric", + supports_out=True, + decorators=( + # dtypes that do not support check_uniform_bounds of rand_like + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), + + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_python_ref_executor', device_type='cuda'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: geometric is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.log_normal", + torch_opinfo_name="log_normal", + supports_out=True, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_python_ref_executor', device_type='cuda'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + ) + ), + PythonRefInfo( + "_refs.normal", + torch_opinfo_name="normal", + supports_out=True, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + PythonRefInfo( + "_refs.normal", + torch_opinfo_name="normal", + torch_opinfo_variant_name="number_mean", + supports_out=True, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + PythonRefInfo( + "_refs.normal_", + op=torch.Tensor.normal_, + torch_opinfo_name="normal", + torch_opinfo_variant_name="in_place", + supports_out=False, + decorators=( + # TODO: RuntimeError: no _refs support for torch.rand_like + DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), + 'TestCommon', + 'test_python_ref'), + + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ) + ), + PythonRefInfo( + "_refs.arange", + torch_opinfo_name="arange", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.linspace", + torch_opinfo_name="linspace", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # cpu implementation is wrong on some integral types + # https://github.com/pytorch/pytorch/issues/81996 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + + # cuda implementation is off-by-one on some inputs due to precision issues + # https://github.com/pytorch/pytorch/issues/82230 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.linspace", + torch_opinfo_name="linspace", + torch_opinfo_variant_name="tensor_overload", + skips=( + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + + # cpu implementation is wrong on some integral types + # https://github.com/pytorch/pytorch/issues/81996 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), + + # cuda implementation is off-by-one on some inputs due to precision issues + # https://github.com/pytorch/pytorch/issues/82230 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.logspace", + torch_opinfo_name="logspace", + skips=( + # Tests that assume input is a tensor or sequence of tensors + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.logspace", + torch_opinfo_name="logspace", + torch_opinfo_variant_name="tensor_overload", + skips=( + # TypeError: 'int' object is not subscriptable + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), + + # Off-by-one issue when casting floats to ints + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.int16, torch.int32, torch.int64), + device_type="cuda"), + ), + ), + PythonRefInfo( + "_refs.meshgrid", + torch_opinfo_name="meshgrid", + torch_opinfo_variant_name="variadic_tensors", + ), + PythonRefInfo( + "_refs.take_along_dim", + torch_opinfo_name="take_along_dim", + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestCommon', + 'test_python_ref'), + ), + ), + PythonRefInfo( + "_refs.to", + torch_opinfo_name="to", + ), + PythonRefInfo( + "_refs.triu", + torch_opinfo_name="triu", + ), + PythonRefInfo( + "_refs.tril", + torch_opinfo_name="tril", + ), + PythonRefInfo( + "_refs.triu_indices", + torch_opinfo_name="triu_indices", + # the implementation uses torch.stack that violates view consistency + validate_view_consistency=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + PythonRefInfo( + "_refs.tril_indices", + torch_opinfo_name="tril_indices", + # the implementation uses torch.stack that violates view consistency + validate_view_consistency=False, + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), + DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), + DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), + DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), + )), + PythonRefInfo( + "_refs.meshgrid", + torch_opinfo_name="meshgrid", + torch_opinfo_variant_name="list_of_tensors", + ), + PythonRefInfo( + "_refs.movedim", + aliases=('moveaxis',), + torch_opinfo_name="movedim", + ), + PythonRefInfo( + "_refs.bucketize", + torch_opinfo_name="bucketize", + skips=( + # RuntimeError: It appears that you're trying to get value out of a tracing tensor with + # aten._local_scalar_dense.default - erroring out! [...] + # triggered by mid_val = boundaries[mid] + DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref_executor"), + ) + ), + PythonRefInfo( + "_refs.equal", + torch_opinfo_name="equal", + skips=( + # RuntimeError: Cannot cast FakeTensor to number + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta',), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.atan", + torch_opinfo_name="atan", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + active_if=TEST_WITH_ROCM, device_type='cuda', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + active_if=TEST_WITH_ROCM, device_type='cuda', + dtypes=[torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.atanh", + torch_opinfo_name="atanh", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', dtypes=[torch.cfloat], + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + active_if=TEST_WITH_ROCM, device_type='cuda', + dtypes=[torch.complex128]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.bitwise_not", + torch_opinfo_name="bitwise_not", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.ceil", + torch_opinfo_name="ceil", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + ), + PythonRefInfo( + "_refs.item", + torch_opinfo_name="item", + skips=( + # RuntimeError: Cannot cast FakeTensor(FakeTensor(..., device='meta', size=()), cpu) to number + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta'), + # ValueError: Can't convert a tensor with 10 elements to a number! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.conj_physical", + torch_opinfo_name="conj_physical", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.cos", + torch_opinfo_name="cos", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', + active_if=IS_WINDOWS), + # This fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.cosh", + torch_opinfo_name="cosh", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', + dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed) + # Greatest relative difference: nan at index (6000,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cuda', + dtypes=(torch.chalf,), active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.digamma", + torch_opinfo_name="digamma", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.erf", + torch_opinfo_name="erf", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.erfinv", + torch_opinfo_name="erfinv", + decorators=(precisionOverride({torch.float16: 1e-2, + torch.bfloat16: 1e-2, + torch.float32: 1e-4}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611 + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.erfc", + torch_opinfo_name="erfc", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.exp", + torch_opinfo_name="exp", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.expm1", + torch_opinfo_name="expm1", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.exp2", + torch_opinfo_name="exp2", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cdouble]), + # Reference: https://github.com/pytorch/pytorch/issues/48010 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.fill", + torch_opinfo_name="fill", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.floor", + torch_opinfo_name="floor", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + ), + ElementwiseUnaryPythonRefInfo( + "_refs.frac", + torch_opinfo_name="frac", + skips=( + DecorateInfo( + unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.imag", + torch_opinfo_name="imag", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isfinite", + torch_opinfo_name="isfinite", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isinf", + torch_opinfo_name="isinf", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isposinf", + torch_opinfo_name="isposinf", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isneginf", + torch_opinfo_name="isneginf", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isnan", + torch_opinfo_name="isnan", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.isreal", + torch_opinfo_name="isreal", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.i0", + torch_opinfo_name="i0", + decorators=(precisionOverride({torch.bfloat16: 3e-1, + torch.float16: 5e-1}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.int8,)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.lgamma", + torch_opinfo_name="lgamma", + decorators=(precisionOverride({torch.float16: 7e-1}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.multigammaln", + torch_opinfo_name="mvlgamma", + torch_opinfo_variant_name="mvlgamma_p_1", + skips=skips_mvlgamma(), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.multigammaln", + torch_opinfo_name="mvlgamma", + torch_opinfo_variant_name="mvlgamma_p_3", + skips=skips_mvlgamma(), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.special.multigammaln", + torch_opinfo_name="mvlgamma", + torch_opinfo_variant_name="mvlgamma_p_5", + skips=skips_mvlgamma(), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log", + torch_opinfo_name="log", + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log1p", + torch_opinfo_name="log1p", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log10", + torch_opinfo_name="log10", + decorators=(precisionOverride({torch.bfloat16: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.log2", + torch_opinfo_name="log2", + decorators=(precisionOverride({torch.bfloat16: 1e-1}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + PythonRefInfo( + "_refs.logsumexp", + torch_opinfo_name="logsumexp", + # When keepdim=False logsumexp function uses squeeze operation + # that is not yet exposed in nvFuser's Python API. + ), + PythonRefInfo( + "_refs.log_softmax", + torch_opinfo_name="log_softmax", + torch_opinfo_variant_name="with_dtype", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nan_to_num", + torch_opinfo_name="nan_to_num", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.neg", + torch_opinfo_name="neg", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.positive", + torch_opinfo_name="positive", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.real", + torch_opinfo_name="real", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.reciprocal", + torch_opinfo_name="reciprocal", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/45690 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.round", + torch_opinfo_name="round", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + skips=( + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_extremal", + device_type="cuda"), + DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), + "TestUnaryUfuncs", "test_reference_numerics_normal", + device_type="cuda"), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.rsqrt", + torch_opinfo_name="rsqrt", + decorators=(precisionOverride({torch.half: 5e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble)), + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: nan at index (700,) (up to 0.01 allowed) + # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) + DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.chalf,)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sigmoid", + torch_opinfo_name="sigmoid", + aliases=('_refs.special.expit',), + # Reference: https://github.com/pytorch/pytorch/issues/56012 + handles_complex_extremal_values=False, + handles_large_floats=False, + decorators=(precisionOverride({torch.float16: 1e-2, + torch.complex64: 1e-1, + torch.bfloat16: 1e-2}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/56012 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.complex64, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.chalf, torch.complex64, torch.cdouble]) + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sign", + torch_opinfo_name="sign", + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, + torch.float64]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sgn", + torch_opinfo_name="sgn", + # This is an issue with the vectorised abs on CPU + handles_complex_extremal_values=False, + handles_large_floats=False, + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/41245 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=[torch.bfloat16, torch.float16, torch.float32, + torch.float64]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.signbit", + torch_opinfo_name="signbit", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sin", + torch_opinfo_name="sin", + decorators=(precisionOverride({torch.bfloat16: 1e-2}),), + skips=( + # Fails on CUDA but passes on ROCm + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cdouble,), device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', + active_if=IS_WINDOWS), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', + active_if=IS_WINDOWS), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sinc", + torch_opinfo_name="sinc", + decorators=(precisionOverride({torch.bfloat16: 1e-2, + torch.float16: 1e-2}),), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/49133 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_small', + dtypes=[torch.cfloat]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sinh", + torch_opinfo_name="sinh", + decorators=(precisionOverride({torch.float16: 1e-2}),), + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.cdouble,)), + # Reference: https://github.com/pytorch/pytorch/issues/48641 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.int8]), + ), + ), + PythonRefInfo( + "_refs.softmax", + torch_opinfo_name="softmax", + torch_opinfo_variant_name="with_dtype", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.sqrt", + torch_opinfo_name="sqrt", + decorators=( + precisionOverride({torch.bfloat16: 7e-2}), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestUnaryUfuncs', 'test_reference_numerics_large'), + ), + skips=( + # Reference: https://github.com/pytorch/pytorch/issues/47358 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=(torch.cfloat, torch.cdouble), + active_if=IS_MACOS), + # Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=(torch.bfloat16,)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.square", + torch_opinfo_name="square", + decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),), + skips=( + # AssertionError: Reference result was farther (2.2417024338305655e-07) from the precise computation + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor', dtypes=(torch.complex64,)), + # Reference: https://github.com/pytorch/pytorch/issues/52549 + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.tan", + torch_opinfo_name="tan", + decorators=[ + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=1e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.tanh", + torch_opinfo_name="tanh", + decorators=[ + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=2e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_extremal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_large', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], + active_if=(IS_MACOS or IS_WINDOWS)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.trunc", + torch_opinfo_name="trunc", + # Fails on int32 + # https://github.com/pytorch/pytorch/issues/85258 + ), + PythonRefInfo( + "_refs.special.log_softmax", + torch_opinfo_name="log_softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + PythonRefInfo( + "_refs.special.softmax", + torch_opinfo_name="softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + # + # Elementwise Unary Special OpInfos + # + ElementwiseUnaryPythonRefInfo( + "_refs.special.logit", + torch_opinfo_name="logit", + ), + # + # Elementwise Unary nn.functional OpInfos + # + PythonRefInfo( + "_refs.nn.functional.alpha_dropout", + torch_opinfo_name="nn.functional.alpha_dropout", + decorators=( + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref_executor', device_type='cuda'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # AssertionError: Tensor-likes are not close! + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.celu", + torch_opinfo_name="nn.functional.celu", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.threshold", + torch_opinfo_name="nn.functional.threshold", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.dropout", + torch_opinfo_name="nn.functional.dropout", + decorators=( + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: dropout is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # dropout is not comparable + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.elu", + torch_opinfo_name="nn.functional.elu", + supports_out=True, + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-03, rtol=1.2e-03), + torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.hardtanh", + torch_opinfo_name="nn.functional.hardtanh", + supports_out=True, + ), + PythonRefInfo( # TODO: Port this to an UnaryOpInfo + "_refs.nn.functional.gelu", + torch_opinfo_name="nn.functional.gelu", + ), + PythonRefInfo( + "_refs.nn.functional.layer_norm", + torch_opinfo_name="nn.functional.layer_norm", + skips=( + # Reference result was farther (3.5762786809723224e-07) from the precise computation + # than the torch result was (2.5068410824946596e-07)! + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.float32,), device_type='cpu'), + ), + ), + PythonRefInfo( + "_refs.nn.functional.glu", + torch_opinfo_name="nn.functional.glu", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.pairwise_distance", + torch_opinfo_name="nn.functional.pairwise_distance", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.pdist", + torch_opinfo_name="nn.functional.pdist", + supports_out=True, + skips=( + # RunTimeError: no _refs support for torch.Tensor.index_select + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + )), + PythonRefInfo( + "_refs.nn.functional.leaky_relu", + torch_opinfo_name="nn.functional.leaky_relu", + supports_out=True, + ), + PythonRefInfo( + "_refs.nn.functional.log_softmax", + torch_opinfo_name="log_softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + PythonRefInfo( + "_refs.nn.functional.poisson_nll_loss", + torch_opinfo_name="nn.functional.poisson_nll_loss", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.prelu", + torch_opinfo_name="nn.functional.prelu", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.relu", + torch_opinfo_name="nn.functional.relu", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.relu6", + torch_opinfo_name="nn.functional.relu6", + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.mish", + torch_opinfo_name="nn.functional.mish", + supports_out=True, + decorators=[ + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), + 'TestUnaryUfuncs',), ], + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.selu", + torch_opinfo_name="nn.functional.selu", + supports_out=True, + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float16: tol(atol=1e-2, rtol=1.8e-2), + torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2) + }), + 'TestUnaryUfuncs', device_type='cuda', + ), ], + ), + PythonRefInfo( + "_refs.nn.functional.softmax", + torch_opinfo_name="softmax", # alias + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + PythonRefInfo( + "_refs.nn.functional.softmin", + torch_opinfo_name="nn.functional.softmin", + torch_opinfo_variant_name="with_dtype", + supports_out=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.softplus", + torch_opinfo_name="nn.functional.softplus", + ), + PythonRefInfo( + "_refs.nn.functional.l1_loss", + torch_opinfo_name="nn.functional.l1_loss", + ), + PythonRefInfo( + "_refs.nn.functional.margin_ranking_loss", + torch_opinfo_name="nn.functional.margin_ranking_loss", + ), + PythonRefInfo( + "_refs.nn.functional.mse_loss", + torch_opinfo_name="nn.functional.mse_loss", + ), + PythonRefInfo( + "_refs.nn.functional.smooth_l1_loss", + torch_opinfo_name="nn.functional.smooth_l1_loss", + ), + PythonRefInfo( + "_refs.nn.functional.hinge_embedding_loss", + torch_opinfo_name="nn.functional.hinge_embedding_loss", + ), + PythonRefInfo( + "_refs.nn.functional.nll_loss", + torch_opinfo_name="nn.functional.nll_loss", + # The corresponding PyTorch op doesn't support out. But the ref is + # registered as a decomp and ATen has an out variant. + supports_out=True, + # For simpler indexing, we flatten target indices, then reshape the result tensor. + # This creates inconsistent view state with reference impl. + validate_view_consistency=False, + skips=( + # RuntimeError: It appears that you're trying to get value out of a tracing tensor - erroring out! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', device_type="cuda" + ), + ), + ), + PythonRefInfo( + "_refs.nn.functional.huber_loss", + torch_opinfo_name="nn.functional.huber_loss", + # The corresponding PyTorch op doesn't support out. But the ref is + # registered as a decomp and ATen has an out variant. + supports_out=True, + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.tanhshrink", + torch_opinfo_name="nn.functional.tanhshrink", + decorators=[ + DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', + 'test_reference_numerics_normal', + device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), + DecorateInfo( + toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02), + torch.complex64: tol(atol=6e-04, rtol=1e-05)}), + 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), + ], + skips=( + # in each case, pytorch will produce a nan while numpy will not + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_large", + dtypes=(torch.complex64, torch.complex128), + active_if=(IS_MACOS)), + DecorateInfo(unittest.skip("Fails on some jobs works on others!"), + 'TestUnaryUfuncs', "test_reference_numerics_extremal", + dtypes=(torch.complex64, torch.complex128), + device_type='cpu', + active_if=(IS_MACOS or IS_WINDOWS)), + ), + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.hardshrink", + torch_opinfo_name="nn.functional.hardshrink", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.nn.functional.softshrink", + torch_opinfo_name="nn.functional.softshrink", + ), + # + # Elementwise Binary Reference OpInfos + # + ElementwiseBinaryPythonRefInfo( + "_refs.add", + torch_opinfo_name="add", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + ), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.complex64, torch.complex128)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.atan2", + torch_opinfo_name="atan2", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_and", + torch_opinfo_name="bitwise_and", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_left_shift", + torch_opinfo_name="bitwise_left_shift", + skips=( + # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_right_shift", + torch_opinfo_name="bitwise_right_shift", + skips=( + # # https://github.com/pytorch/pytorch/issues/70904 + DecorateInfo(unittest.skip("Skipped some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_or", + torch_opinfo_name="bitwise_or", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.bitwise_xor", + torch_opinfo_name="bitwise_xor", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.copysign", + torch_opinfo_name="copysign", + skips=( + # RuntimeError: Expected divisor (b) to be on the same device (cuda:0) as dividend (a), but it is found on cpu! + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), + # FIXME output 0: meta disagrees with real impl + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs.div", + torch_opinfo_name="div", + torch_opinfo_variant_name="no_rounding_mode", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + skips=( + # NotImplementedError: argument of type: + DecorateInfo( + unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32, torch.complex64, torch.complex128,) + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type="cuda" + ), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.div", + torch_opinfo_name="div", + torch_opinfo_variant_name="trunc_rounding", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.div", + torch_opinfo_name="div", + torch_opinfo_variant_name="floor_rounding", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + # See https://github.com/pytorch/pytorch/issues/111126 + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.eq", + torch_opinfo_name="eq", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.float_power", + torch_opinfo_name="float_power", + skips=( + # Test doesn't account for float -> double type promotion + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + # Complex values error with: Greatest absolute difference: nan at index + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_large_values', + dtypes=[torch.complex64, torch.complex128]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=[torch.complex64, torch.complex128]), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logaddexp", + torch_opinfo_name="logaddexp", + skips=( + # failure due to mismatch in edge cases, which boils down to what torch.exp(inf + infj) should be + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', device_type='cpu', + dtypes=(torch.complex64, torch.complex128)), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', device_type='cpu', + dtypes=(torch.complex64, torch.complex128)), + ), + ), + PythonRefInfo( + "_refs.logaddexp2", + torch_opinfo_name="logaddexp2", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.floor_divide", + torch_opinfo_name="floor_divide", + rhs_make_tensor_kwargs=dict(exclude_zero=True), + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + # bfloat16 floor_divide compared with a float32 reference works inconsistently + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.bfloat16,)), + # bfloat16 floor_divide compared with a float32 reference works inconsistently + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + dtypes=(torch.bfloat16,)), + # int8 floor divide has different results for -128 // -1 vs. NumPy + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.int8,)), + # The following tests fails on some jobs + DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.float16,)), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=5e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + # FIXME output 0: meta disagrees with real impl + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.fmax", + torch_opinfo_name="fmax", + supports_rhs_python_scalar=False, + ), + ElementwiseBinaryPythonRefInfo( + "_refs.fmin", + torch_opinfo_name="fmin", + supports_rhs_python_scalar=False, + ), + ElementwiseBinaryPythonRefInfo( + "_refs.fmod", + torch_opinfo_name="fmod", + rhs_make_tensor_kwargs={'exclude_zero': True}, + supports_rhs_python_scalar=True, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_contig_vs_every_other', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_non_contig', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.gcd", + torch_opinfo_name="gcd", + skips=( + DecorateInfo(unittest.expectedFailure, + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.int8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.ge", + torch_opinfo_name="ge", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.gt", + torch_opinfo_name="gt", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.heaviside", + torch_opinfo_name="heaviside", + supports_rhs_python_scalar=False, + skips=( + # PyTorch's heaviside does not appear to propagate NaNs + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.hypot", + torch_opinfo_name="hypot", + supports_rhs_python_scalar=False, + ), + ElementwiseBinaryPythonRefInfo( + "_refs.igamma", + torch_opinfo_name="igamma", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.igammac", + torch_opinfo_name="igammac", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.isclose", + torch_opinfo_name="isclose", + skips=( + # Intentional xfail -- isclose does not type promote + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.lcm", + torch_opinfo_name="lcm", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.le", + torch_opinfo_name="le", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logical_and", + torch_opinfo_name="logical_and", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.logical_not", + torch_opinfo_name="logical_not", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logical_or", + torch_opinfo_name="logical_or", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.logical_xor", + torch_opinfo_name="logical_xor", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.lt", + torch_opinfo_name="lt", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.maximum", + torch_opinfo_name="maximum", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.minimum", + torch_opinfo_name="minimum", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.mul", + torch_opinfo_name="mul", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + skips=( + # Reference result was farther (0.0) from the precise computation + # than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32,), + ), + # Reference result was farther (0.0) from the precise computation + # than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type='cuda' + ), + # Reference result was farther (0.0) from the precise computation + # than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type='cuda' + ), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs.ne", + torch_opinfo_name="ne", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.nextafter", + torch_opinfo_name="nextafter", + ), + ElementwiseBinaryPythonRefInfo( + "_refs.pow", + torch_opinfo_name="pow", + decorators=( + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo( + toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), + torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), + 'TestBinaryUfuncs', 'test_scalar_support'), + ), + skips=( + # Reference result was farther (inf) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32,), + ), + # Reference result was farther (inf) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Reference result was farther (inf) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Skipping integers because they are being raised to negative powers causing an error + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]), + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', + 'test_reference_numerics_large_values', + dtypes=[torch.int16, torch.int32, torch.int64]), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.complex32,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_large_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_extremal_values', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.remainder", + torch_opinfo_name="remainder", + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.bfloat16,), device_type='cpu'), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.bfloat16,)), + DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.rsub", + torch_opinfo_name="rsub", + # https://github.com/pytorch/pytorch/issues/76944 + skips=( + # Reference result was farther (nan) from the precise computation than + # the torch result was (nan)! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.chalf,), device_type='cpu'), + # Reference result was farther (nan) from the precise computation than + # the torch result was (nan)! + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.chalf,), device_type='cpu'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.sub", + torch_opinfo_name="sub", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + decorators=( + DecorateInfo( + toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0), + torch.bfloat16: tol(atol=1e-5, rtol=5e-3), + torch.complex32: tol(atol=1e-5, rtol=1e-3)}), + 'TestBinaryUfuncs', 'test_reference_numerics'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), + 'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_comprehensive', device_type='cpu'), + DecorateInfo( + toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), + 'TestDecomp', 'test_quick', device_type='cpu'), + ), + skips=( + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics', + dtypes=(torch.uint8,)), + DecorateInfo(unittest.skip("Skipped!"), + 'TestBinaryUfuncs', + 'test_reference_numerics_small_values', + dtypes=(torch.uint8,)), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.true_divide", + torch_opinfo_name="true_divide", + # https://github.com/pytorch/pytorch/issues/76944 + supports_two_python_scalars=True, + supports_one_python_scalar=True, + skips=( + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', + dtypes=(torch.complex32,), + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref', + dtypes=(torch.complex32,), device_type="cuda" + ), + # Reference result was farther (0.7433461727239705) from the precise + # computation than the torch result was (nan)! + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.complex32,), device_type="cuda" + ), + ), + ), + # + # Elementwise Ternary Reference OpInfos + # + PythonRefInfo( + "_refs.addcdiv", + torch_opinfo_name="addcdiv", + ), + PythonRefInfo( + "_refs.addcmul", + torch_opinfo_name="addcmul", + skips=( + # Reference result was farther (1.3343989849090576e-05) + # from the precise computation than the torch result + # was (9.592622518539429e-06)! + # FIXME: enable dtype-based tolerances in test_ops.py:TestCommon._ref_test_helper + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.float16,), device_type="cpu"), + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', + dtypes=(torch.float16,), device_type="cpu"), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.clamp_min", + torch_opinfo_name="clamp_min", + skips=( + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + ElementwiseBinaryPythonRefInfo( + "_refs.clamp_max", + torch_opinfo_name="clamp_max", + skips=( + # test error disabled since rhs non-tensor python scalar is supported + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.clamp", + torch_opinfo_name="clamp", + ), + PythonRefInfo( + "_refs.nn.functional.triplet_margin_loss", + torch_opinfo_name="nn.functional.triplet_margin_loss", + supports_out=False, + # TODO: Uses minimum and clamp + skips=( + # AssertionError: Tensor-likes are not close! + # Greatest absolute difference: 6.103515625e-05 at index (4,) (up to 1e-05 allowed) + # Greatest relative difference: 8.519846983548175e-06 at index (4,) (up to 1.3e-06 allowed) + DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', + dtypes=(torch.uint8,), device_type="cpu"), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs.xlogy", + torch_opinfo_name="xlogy", + supports_one_python_scalar=True, + ), + # + # Elementwise Binary Special OpInfos + # + ElementwiseBinaryPythonRefInfo( + "_refs.special.xlog1py", + torch_opinfo_name="special.xlog1py", + supports_one_python_scalar=True, + ), + # + # Data Conversion & Data Movement Opinfos + # + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.bfloat16", + torch_opinfo_name="bfloat16", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.bool", + torch_opinfo_name="bool", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.byte", + torch_opinfo_name="byte", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.char", + torch_opinfo_name="char", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs._conversions.complex", + torch_opinfo_name="complex", + error_inputs_func=partial(error_inputs_complex, is_ref=True), + skips=( + # Tests don't account for complex's type promotion semantics + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ) + ), + ElementwiseBinaryPythonRefInfo( + "_refs._conversions.polar", + torch_opinfo_name="polar", + skips=( + # Tests don't account for complex's type promotion semantics + DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), + DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.double", + torch_opinfo_name="double", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.float", + torch_opinfo_name="float", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.half", + torch_opinfo_name="half", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.int", + torch_opinfo_name="int", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.long", + torch_opinfo_name="long", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.short", + torch_opinfo_name="short", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + skips=( + DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), + ) + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.chalf", + torch_opinfo_name="chalf", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.cfloat", + torch_opinfo_name="cfloat", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + ElementwiseUnaryPythonRefInfo( + "_refs._conversions.cdouble", + torch_opinfo_name="cdouble", + # TODO: If self already has the correct dtype and device, then self is + # returned ignoring memory_format. + # https://github.com/pytorch/pytorch/issues/86558 + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.clone", + torch_opinfo_name="clone", + ), + # + # View & Shape OpInfos + # + PythonRefInfo( + "_refs.atleast_1d", + torch_opinfo_name="atleast_1d", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.atleast_2d", + torch_opinfo_name="atleast_2d", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.atleast_3d", + torch_opinfo_name="atleast_3d", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.as_strided", + torch_opinfo_name="as_strided", + # FIXME: doesn't support chalf + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.as_strided", + torch_opinfo_name="as_strided", + torch_opinfo_variant_name="partial_views", + # FIXME: doesn't support chalf + dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), + skips=( + # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.as_strided_scatter", + torch_opinfo_name="as_strided_scatter", + # returns a view of an intermediate tensor (as_strided) + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.broadcast_shapes", + torch_opinfo_name="broadcast_shapes", + ), + PythonRefInfo( + "_refs.broadcast_tensors", + torch_opinfo_name="broadcast_tensors", + ), + PythonRefInfo( + "_refs.broadcast_to", + torch_opinfo_name="broadcast_to", + ), + PythonRefInfo( + "_refs.cat", + torch_opinfo_name="cat", + skips=( + # FIXME: AssertionError: RuntimeError not raised + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.chunk", + torch_opinfo_name="chunk", + ), + PythonRefInfo( + "_refs.column_stack", + torch_opinfo_name="column_stack", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.conj", + torch_opinfo_name="conj", + ), + PythonRefInfo( + "_refs.constant_pad_nd", + torch_opinfo_name="constant_pad_nd", + ), + PythonRefInfo( + "_refs.contiguous", + torch_opinfo_name="contiguous", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.deg2rad", + torch_opinfo_name="deg2rad", + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + ), + PythonRefInfo( + "_refs.dsplit", + torch_opinfo_name="dsplit", + ), + PythonRefInfo( + "_refs.diag", + torch_opinfo_name="diag", + ), + PythonRefInfo( + "_refs.diagonal", + torch_opinfo_name="diagonal", + ), + PythonRefInfo( + "_refs.diagonal_copy", + torch_opinfo_name="diagonal_copy", + ), + PythonRefInfo( + "_refs.diagonal_scatter", + torch_opinfo_name="diagonal_scatter", + supports_out=True, + # returns a view of an intermediate tensor (as_strided) + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.diag_embed", + torch_opinfo_name="diag_embed", + supports_out=True, + ), + PythonRefInfo( + "_refs.dstack", + torch_opinfo_name="dstack", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.expand", + torch_opinfo_name="expand", + ), + PythonRefInfo( + "_refs.expand_as", + torch_opinfo_name="expand_as", + ), + PythonRefInfo( + "_refs.flatten", + torch_opinfo_name="flatten", + ), + PythonRefInfo( + "_refs.flip", + torch_opinfo_name="flip", + ), + PythonRefInfo( + "_refs.fliplr", + torch_opinfo_name="fliplr", + ), + PythonRefInfo( + "_refs.flipud", + torch_opinfo_name="flipud", + ), + PythonRefInfo( + "_refs.hstack", + torch_opinfo_name="hstack", + skips=( + # https://github.com/pytorch/pytorch/issues/78613 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.narrow", + torch_opinfo_name="narrow", + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=True, is_ref=True), + ), + PythonRefInfo( + "_refs.narrow_copy", + torch_opinfo_name="narrow_copy", + supports_out=True, + error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=False, is_ref=True), + ), + PythonRefInfo( + "_refs.nn.functional.group_norm", + torch_opinfo_name="nn.functional.group_norm", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.native_layer_norm", + torch_opinfo_name="native_layer_norm", + skips=( + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_python_ref", + device_type="cpu", dtypes=(torch.float32,)), + DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_python_ref_torch_fallback", + device_type="cpu", dtypes=(torch.float32,)), + ), + ), + PythonRefInfo( + "_refs.permute", + torch_opinfo_name="permute", + ), + ElementwiseUnaryPythonRefInfo( + "_refs.rad2deg", + torch_opinfo_name="rad2deg", + decorators=(precisionOverride({torch.bfloat16: 7e-1, + torch.float16: 7e-1}),), + ), + PythonRefInfo( + "_refs.ravel", + torch_opinfo_name="ravel", + ), + PythonRefInfo( + "_refs.renorm", + torch_opinfo_name="renorm", + ), + PythonRefInfo( + "_refs.repeat", + torch_opinfo_name="repeat", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.reshape", + torch_opinfo_name="reshape", + ), + PythonRefInfo( + "_refs.reshape_as", + torch_opinfo_name="reshape_as", + ), + PythonRefInfo( + "_refs.roll", + torch_opinfo_name="roll", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.rot90", + torch_opinfo_name="rot90", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.stack", + torch_opinfo_name="stack", + validate_view_consistency=False, + ), + PythonRefInfo( + "_refs.squeeze", + torch_opinfo_name="squeeze", + ), + PythonRefInfo( + "_refs.squeeze", + torch_opinfo_name="squeeze", + torch_opinfo_variant_name="multiple", + ), + PythonRefInfo( + "_refs.tensor_split", + torch_opinfo_name="tensor_split", + skips=( + # TensorMeta doesn't support tolist + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta'), + # RuntimeError: no _refs support for torch.Tensor.tolist + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + ), + ), + PythonRefInfo( + "_refs.hsplit", + torch_opinfo_name="hsplit", + ), + PythonRefInfo( + "_refs.vsplit", + torch_opinfo_name="vsplit", + ), + PythonRefInfo( + "_refs.dot", + torch_opinfo_name="dot", + error_inputs_func=partial(error_inputs_dot_vdot, is_ref=True), + # .conj() does not set ._is_view() correctly in ATen + validate_view_consistency=False, + skips=( + # RuntimeError: no _refs support for torch.Tensor.is_conj + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=[torch.complex64, torch.complex128]), + ), + ), + PythonRefInfo( + "_refs.vdot", + torch_opinfo_name="vdot", + error_inputs_func=partial(error_inputs_dot_vdot, is_ref=True), + # .conj() does not set ._is_view() correctly in ATen + validate_view_consistency=False, + skips=( + # RuntimeError: no _refs support for torch.Tensor.is_conj + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=[torch.complex64, torch.complex128]), + ), + ), + PythonRefInfo( + "_refs.transpose", + torch_opinfo_name="transpose", + ), + PythonRefInfo( + "_refs.t", + torch_opinfo_name="t", + ), + PythonRefInfo( + "_refs.T", + torch_opinfo_name="T", + error_inputs_func=partial(error_inputs_T, has_ndims_error=True), + ), + PythonRefInfo( + "_refs.unfold", + torch_opinfo_name="unfold", + ), + PythonRefInfo( + "_refs.unfold_copy", + torch_opinfo_name="unfold_copy", + supports_out=True, + ), + PythonRefInfo( + "_refs.unsqueeze", + torch_opinfo_name="unsqueeze", + ), + PythonRefInfo( + "_refs.view", + torch_opinfo_name="view", + ), + PythonRefInfo( + "_refs.view_as", + torch_opinfo_name="view_as", + ), + PythonRefInfo( + "_refs.vstack", + torch_opinfo_name="vstack", + skips=( + # https://github.com/pytorch/pytorch/issues/78613 + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.unflatten", + torch_opinfo_name="unflatten", + ), + PythonRefInfo( + "_refs.unbind", + torch_opinfo_name="unbind", + ), + # + # Reduction Reference OpInfos + # + ReductionPythonRefInfo( + "_refs.all", + torch_opinfo_name="all", + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_result_dtype', + dtypes=[torch.uint8]), + ), + ), + ReductionPythonRefInfo( + "_refs.amax", + torch_opinfo_name="amax", + error_inputs_func=partial(error_inputs_aminmax_amax_amin, is_ref=True), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionPythonRefInfo( + "_refs.amin", + torch_opinfo_name="amin", + error_inputs_func=partial(error_inputs_aminmax_amax_amin, is_ref=True), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionPythonRefInfo( + "_refs.any", + torch_opinfo_name="any", + skips=( + # FIXME: uint8 input returns uint8 instead of bool + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_result_dtype', + dtypes=[torch.uint8]), + ), + ), + ReductionPythonRefInfo( + "_refs.count_nonzero", + torch_opinfo_name="count_nonzero", + skips=( + # FIXME: count_nonzero does not accept keepdim kwarg + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_dim_default_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_dim_multi_unsorted_keepdim'), + # FIXME: dim=[] reduces all dimensions + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + ), + ), + ReductionPythonRefInfo( + "_refs.mean", + torch_opinfo_name="mean", + supports_out=True, + error_inputs_func=partial(error_inputs_mean, is_ref=True), + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + ), + ), + ReductionPythonRefInfo( + "_refs.std", + torch_opinfo_name="std", + supports_out=True, + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=(torch.float16,)), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_ref_duplicate_values', + dtypes=(torch.float16,)), + ), + ), + # std_mean and var_mean are not ReductionInfos + PythonRefInfo( + "_refs.std_mean", + torch_opinfo_name="std_mean", + ), + ReductionPythonRefInfo( + "_refs.sum", + torch_opinfo_name="sum", + supports_out=True, + skips=( + # FIXME: doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # FIXME: mean reduces all dimensions when dim=[] + DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16]), + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', + 'test_ref_duplicate_values', + dtypes=[torch.float16]), + DecorateInfo( + unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', + dtypes=[torch.float32]), + ), + ), + PythonRefInfo( + "_refs.cumsum", + torch_opinfo_name="cumsum", + supports_out=True, + skips=( + # doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + ), + PythonRefInfo( + "_refs.cumprod", + torch_opinfo_name="cumprod", + supports_out=True, + skips=( + # doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + ), + ), + PythonRefInfo( + "_refs.sum_to_size", + torch_opinfo_name="sum_to_size", + validate_view_consistency=False, + ), + ReductionPythonRefInfo( + "_refs.prod", + torch_opinfo_name="prod", + supports_out=True, + supports_multiple_dims=True, + skips=( + # FIXME: doesn't test out behavior properly for this operator + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', + dtypes=[torch.float16, torch.complex64]), + ), + ), + ReductionPythonRefInfo( + "_refs.var", + torch_opinfo_name="var", + supports_out=True, + skips=( + # FIXME: reduces all dimensions when dim=[] + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), + DecorateInfo( + unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), + # FIXME: improve precision + DecorateInfo( + unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), + ), + ), + PythonRefInfo( + "_refs.var_mean", + torch_opinfo_name="var_mean", + validate_view_consistency=False, + ), + # + # Linear Algebra Operators + # + PythonRefInfo( + "_refs.addr", + torch_opinfo_name="addr", + decorators=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',), + ), + ), + PythonRefInfo( + "_refs.trace", + torch_opinfo_name="trace", + ), + PythonRefInfo( + "_refs.norm", + torch_opinfo_name="norm", + supports_out=True, + # Uses vector_norm inside and vector_norm is affected by + # https://github.com/pytorch/pytorch/issues/77216 + validate_view_consistency=False, + ), + # + # Tensor Creation Reference OpInfos + # + PythonRefInfo( + "_refs.empty", + torch_opinfo_name="empty", + skips=( + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # FIXME: shouldn't check empty results + DecorateInfo(unittest.skip("Can't check result for empty"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.empty_like", + torch_opinfo_name="empty_like", + skips=( + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # FIXME: should not compare results of empty_like + DecorateInfo(unittest.skip("Can't check result for empty_like"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.randn", + torch_opinfo_name="randn", + op=lambda *args, **kwargs: wrapper_set_seed(refs.randn, *args, **kwargs), + skips=( + # see https://github.com/pytorch/pytorch/issues/85121 + DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), + 'TestCommon', + 'test_python_ref_executor'), + # These tests expect the input to be a tensor or a sequence of tensors + DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), + DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_neg_view'), + DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_neg_conj_view'), + ), + ), + PythonRefInfo( + "_refs.eye", + torch_opinfo_name="eye", + skips=( + # skip these tests since we have non tensor input + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), + ), + ), + PythonRefInfo( + "_refs.new_empty", + torch_opinfo_name="new_empty", + skips=( + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestCommon', + 'test_out_warning'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty is not comparable"), + 'TestMathBits', + 'test_neg_view'), + # FIXME: should not compare results of empty_like + DecorateInfo(unittest.skip("Can't check result for new_empty"), 'TestCommon', 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.new_empty_strided", + torch_opinfo_name="new_empty_strided", + skips=( + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + + ), + ), + PythonRefInfo( + "_refs.empty_strided", + torch_opinfo_name="empty_strided", + skips=( + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_torch_fallback'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_conj_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestMathBits', + 'test_neg_view'), + DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), + 'TestCommon', + 'test_python_ref_executor'), + DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), + ), + ), + PythonRefInfo( + "_refs.new_full", + torch_opinfo_name="new_full", + ), + PythonRefInfo( + "_refs.new_ones", + torch_opinfo_name="new_ones", + ), + PythonRefInfo( + "_refs.new_zeros", + torch_opinfo_name="new_zeros", + ), + # + # Conditional Reference OpInfos + # + PythonRefInfo( + "_refs.masked_fill", + torch_opinfo_name="masked_fill", + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.where", + torch_opinfo_name="where", + op=lambda self, condition, other: refs.where(condition, self, other), + skips=( + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors', device_type='cuda'), + ), + ), + PythonRefInfo( + "_refs.index_select", + torch_opinfo_name="index_select", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + # Sample out= with a stride of zero. This _out operation checks that the input has no + # inner overlap + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),) + ), + PythonRefInfo( + "_refs.index_copy", + torch_opinfo_name="index_copy", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + ), + ), + PythonRefInfo( + "_refs.index_add", + torch_opinfo_name="index_add", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), + ), + ), + PythonRefInfo( + "_refs.index_fill", + torch_opinfo_name="index_fill", + # empty_strided + skips=( + # no _refs support for Tensor.__setitem__ + DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),) + ), + # + # Test-related functions + # + PythonRefInfo( + "_refs.allclose", + torch_opinfo_name="allclose", + ), + # + # Misc functions + # + PythonRefInfo( + "_refs.stft", + torch_opinfo_name="stft", + skips=[ + # RuntimeError: no _refs support for aten.pad + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref' + ), + ], + ), + PythonRefInfo( + "_refs.istft", + torch_opinfo_name="istft", + skips=[ + # RuntimeError: no _refs support for aten.unfold_backward + DecorateInfo( + unittest.expectedFailure, 'TestCommon', 'test_python_ref' + ), + ], + ), + PythonRefInfo( + "_refs.view_as_complex", + torch_opinfo_name="view_as_complex", + ), +] +python_ref_db += opinfo.definitions.python_ref_db + +# Common operator groupings +ops_and_refs = op_db + python_ref_db +unary_ufuncs = [op for op in ops_and_refs if isinstance(op, UnaryUfuncInfo)] +binary_ufuncs = [op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo)] +binary_ufuncs_and_refs = tuple(op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo)) +spectral_funcs = [op for op in ops_and_refs if isinstance(op, SpectralFuncInfo)] +sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse] +sparse_csr_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse_csr] +sparse_reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo) and op.supports_sparse] +shape_funcs = [op for op in ops_and_refs if isinstance(op, ShapeFuncInfo)] +reduction_ops = [op for op in ops_and_refs if isinstance(op, ReductionOpInfo)] +reference_filtered_ops = [op for op in reduction_ops if op.ref is not None] +reference_masked_ops = [op for op in reference_filtered_ops if op.name.startswith('masked.')] +sparse_masked_reduction_ops = [op for op in sparse_reduction_ops if op.name.startswith('masked.')] + +# TODO: review porting these to make_tensor +def index_variable(shape, max_indices, device=torch.device('cpu')): + if not isinstance(shape, tuple): + shape = (shape,) + index = torch.rand(*shape, dtype=torch.double, device=device).mul_(max_indices).floor_().long() + return index + +def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')): + assert len(shape) == 2 + assert index_dim < 2 + batch_dim = 1 - index_dim + index = torch.zeros(*shape, dtype=torch.long, device=device) + for i in range(shape[index_dim]): + index.select(index_dim, i).copy_( + torch.randperm(max_indices, device=device)[:shape[batch_dim]]) + if duplicate: + index.select(batch_dim, 0).copy_(index.select(batch_dim, 1)) + return index + +def bernoulli_scalar(): + return torch.tensor(0, dtype=torch.bool).bernoulli_() + +def mask_not_all_zeros(shape): + assert len(shape) > 0 + while True: + result = torch.randn(shape).gt(0) + if result.sum() > 0: + return result + +# Copied from functorch +def xfail(op_name, variant_name='', *, device_type=None, dtypes=None): + return (op_name, variant_name, device_type, dtypes, True) + + +def skip(op_name, variant_name='', *, device_type=None, dtypes=None): + return (op_name, variant_name, device_type, dtypes, False) + + +def skipOps(test_case_name, base_test_name, to_skip): + all_opinfos = op_db + for xfail in to_skip: + op_name, variant_name, device_type, dtypes, expected_failure = xfail + matching_opinfos = [o for o in all_opinfos + if o.name == op_name and o.variant_test_name == variant_name] + assert len(matching_opinfos) >= 1, f"Couldn't find OpInfo for {xfail}" + for op in matching_opinfos: + decorators = list(op.decorators) + if expected_failure: + decorator = DecorateInfo(unittest.expectedFailure, + test_case_name, base_test_name, + device_type=device_type, dtypes=dtypes) + decorators.append(decorator) + else: + decorator = DecorateInfo(unittest.skip("Skipped!"), + test_case_name, base_test_name, + device_type=device_type, dtypes=dtypes) + decorators.append(decorator) + op.decorators = tuple(decorators) + + # This decorator doesn't modify fn in any way + def wrapped(fn): + return fn + return wrapped diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..00bc4313648102c681c35ddf5809204536997e96 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_modules.py @@ -0,0 +1,3568 @@ +import torch +import unittest +from copy import deepcopy +from enum import Enum +from functools import wraps, partial +from itertools import chain, product +import itertools +import math +import torch.nn.functional as F +from torch.nn.utils.rnn import pack_padded_sequence +from torch.testing import make_tensor +from torch.testing._internal.common_cuda import TEST_CUDNN +from torch.testing._internal.common_dtype import ( + floating_types, floating_and_complex_types_and, get_all_fp_dtypes, complex_types_and) +from torch.testing._internal.common_device_type import ( + _TestParametrizer, _update_param_kwargs, toleranceOverride, tol, + skipCUDAIfCudnnVersionLessThan, skipCUDAIfRocm, precisionOverride, skipMeta, skipMPS, skipCUDAVersionIn) +from torch.testing._internal.common_methods_invocations import DecorateInfo +from torch.testing._internal.common_nn import nllloss_reference, get_reduction +from torch.testing._internal.common_utils import ( + freeze_rng_state, set_single_threaded_if_parallel_tbb, skipIfMps, GRADCHECK_NONDET_TOL, TEST_WITH_ROCM, IS_WINDOWS) +from types import ModuleType +from typing import List, Tuple, Type, Set, Dict + +# List of all namespaces containing modules to test. +MODULE_NAMESPACES: List[ModuleType] = [ + torch.nn.modules, + torch.ao.nn.qat.modules, + torch.ao.nn.quantizable.modules, + torch.ao.nn.quantized.modules, + torch.ao.nn.quantized.modules, +] + +# Modules that shouldn't be tested for one reason or another. +MODULES_TO_SKIP: Set[Type] = { + torch.nn.Module, # abstract base class + torch.nn.Container, # deprecated + torch.nn.NLLLoss2d, # deprecated + torch.ao.nn.quantized.MaxPool2d, # aliases to nn.MaxPool2d + torch.ao.nn.quantized.MaxPool2d, # aliases to nn.MaxPool2d +} + +# List of all module classes to test. +MODULE_CLASSES: List[Type] = list(chain(*[ + [getattr(namespace, module_name) for module_name in namespace.__all__] # type: ignore[attr-defined] + for namespace in MODULE_NAMESPACES])) +MODULE_CLASSES = [cls for cls in MODULE_CLASSES if cls not in MODULES_TO_SKIP] + +# Dict of module class -> common name. Useful for making test names more intuitive. +# Example: torch.nn.modules.linear.Linear -> "nn.Linear" +MODULE_CLASS_NAMES: Dict[Type, str] = {} +for namespace in MODULE_NAMESPACES: + for module_name in namespace.__all__: # type: ignore[attr-defined] + module_cls = getattr(namespace, module_name) + namespace_name = namespace.__name__.replace('torch.', '').replace('.modules', '') + + # Deal with any aliases by preferring earlier names. + if module_cls not in MODULE_CLASS_NAMES: + MODULE_CLASS_NAMES[module_cls] = f'{namespace_name}.{module_name}' + + +# Specifies the modes (i.e. train, eval) to test over. +TrainEvalMode = Enum('TrainEvalMode', ('train_only', 'eval_only', 'train_and_eval')) + + +class modules(_TestParametrizer): + """ PROTOTYPE: Decorator for specifying a list of modules over which to run a test. """ + + def __init__(self, module_info_iterable, allowed_dtypes=None, train_eval_mode=TrainEvalMode.train_and_eval): + self.module_info_list = list(module_info_iterable) + self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None + self.train_eval_mode = train_eval_mode + + def _get_training_flags(self, module_info): + training_flags = [] + if (self.train_eval_mode == TrainEvalMode.train_only or + self.train_eval_mode == TrainEvalMode.train_and_eval): + training_flags.append(True) + + if (self.train_eval_mode == TrainEvalMode.eval_only or + self.train_eval_mode == TrainEvalMode.train_and_eval): + training_flags.append(False) + + # If train and eval modes don't differ for the module, don't bother using more than one. + if not module_info.train_and_eval_differ: + training_flags = training_flags[:1] + + return training_flags + + def _parametrize_test(self, test, generic_cls, device_cls): + if device_cls is None: + raise RuntimeError('The @modules decorator is only intended to be used in a device-specific ' + 'context; use it with instantiate_device_type_tests() instead of ' + 'instantiate_parametrized_tests()') + + for module_info in self.module_info_list: + dtypes = set(module_info.dtypes) + if self.allowed_dtypes is not None: + dtypes = dtypes.intersection(self.allowed_dtypes) + + training_flags = self._get_training_flags(module_info) + for (training, dtype) in product(training_flags, dtypes): + # Construct the test name; device / dtype parts are handled outside. + # See [Note: device and dtype suffix placement] + test_name = module_info.formatted_name + if len(training_flags) > 1: + test_name += f"_{'train_mode' if training else 'eval_mode'}" + + # Construct parameter kwargs to pass to the test. + param_kwargs = {'module_info': module_info} + _update_param_kwargs(param_kwargs, 'dtype', dtype) + _update_param_kwargs(param_kwargs, 'training', training) + + try: + + @wraps(test) + def test_wrapper(*args, **kwargs): + return test(*args, **kwargs) + + decorator_fn = partial(module_info.get_decorators, generic_cls.__name__, + test.__name__, device_cls.device_type, dtype) + + yield (test_wrapper, test_name, param_kwargs, decorator_fn) + except Exception as ex: + # Provides an error message for debugging before rethrowing the exception + print(f"Failed to instantiate {test_name} for module {module_info.name}!") + raise ex + + +def get_module_common_name(module_cls): + if module_cls in MODULE_CLASS_NAMES: + # Example: "nn.Linear" + return MODULE_CLASS_NAMES[module_cls] + else: + return module_cls.__name__ + + +class FunctionInput: + """ Contains args and kwargs to pass as input to a function. """ + __slots__ = ['args', 'kwargs'] + + def __init__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + + +class ModuleInput: + """ Contains args / kwargs for module instantiation + forward pass. """ + __slots__ = ['constructor_input', 'forward_input', 'desc', 'reference_fn'] + + def __init__(self, constructor_input, forward_input=None, desc='', reference_fn=None): + self.constructor_input = constructor_input # Inputs to pass during construction + self.forward_input = forward_input # Inputs to pass to forward() + self.desc = desc # Description for this set of inputs + self.reference_fn = reference_fn # Reference with signature: reference_fn(module, parameters, *args, **kwargs) + + if reference_fn is not None: + + @wraps(reference_fn) + def copy_reference_fn(m, *args, **kwargs): + # Copy inputs to avoid undesired side effects from calling the reference. + args, kwargs = deepcopy(args), deepcopy(kwargs) + + # Note that module parameters are passed in for convenience. + return reference_fn(m, list(m.parameters()), *args, **kwargs) + + self.reference_fn = copy_reference_fn + +class ModuleErrorEnum(Enum): + """ Enumerates when error is raised when testing modules. """ + CONSTRUCTION_ERROR = 0 + FORWARD_ERROR = 1 + +class ErrorModuleInput: + """ + A ModuleInput that will cause the operation to throw an error plus information + about the resulting error. + """ + + __slots__ = ["module_error_input", "error_on", "error_type", "error_regex"] + + def __init__(self, + module_error_input, + *, + error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, + error_type=RuntimeError, + error_regex): + self.module_error_input = module_error_input + self.error_on = error_on + self.error_type = error_type + self.error_regex = error_regex + + +class ModuleInfo: + """ Module information to be used in testing. """ + + def __init__(self, + module_cls, # Class object for the module under test + *, + module_inputs_func, # Function to generate module inputs + skips=(), # Indicates which tests to skip + decorators=None, # Additional decorators to apply to generated tests + dtypes=floating_types(), # dtypes this function is expected to work with + supports_gradgrad=True, # whether the op supports second order gradients + gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck + module_memformat_affects_out=False, # whether converting module to channels last will generate + # channels last output + train_and_eval_differ=False, # whether the module has differing behavior between train and eval + module_error_inputs_func=None, # Function to generate module inputs that error + ): + self.module_cls = module_cls + self.module_inputs_func = module_inputs_func + self.decorators = (*(decorators if decorators else []), *(skips if skips else [])) + self.dtypes = dtypes + self.supports_gradgrad = supports_gradgrad + self.gradcheck_nondet_tol = gradcheck_nondet_tol + self.module_memformat_affects_out = module_memformat_affects_out + self.train_and_eval_differ = train_and_eval_differ + self.module_error_inputs_func = module_error_inputs_func + + def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): + result = [set_single_threaded_if_parallel_tbb] + for decorator in self.decorators: + if isinstance(decorator, DecorateInfo): + if decorator.is_active(test_class, test_name, device, dtype, param_kwargs): + result.extend(decorator.decorators) + else: + result.append(decorator) + return result + + @property + def name(self): + return get_module_common_name(self.module_cls) + + @property + def formatted_name(self): + return self.name.replace('.', '_') + +# Start of module inputs functions. + +def module_inputs_torch_nn_Linear(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + module_inputs = [ + ModuleInput(constructor_input=FunctionInput(10, 8), + forward_input=FunctionInput(input=make_input((4, 10))), + reference_fn=lambda m, p, input: torch.mm(input, p[0].t()) + p[1].view(1, -1).expand(4, 8)), + ModuleInput(constructor_input=FunctionInput(10, 8, bias=False), + forward_input=FunctionInput(make_input((4, 10))), + desc='no_bias', + reference_fn=lambda m, p, i: torch.mm(i, p[0].t())), + ModuleInput(constructor_input=FunctionInput(3, 5), + forward_input=FunctionInput(make_input(3)), + desc='no_batch_dim', + reference_fn=lambda m, p, i: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1]) + ] + + return module_inputs + + +def module_inputs_torch_nn_Bilinear(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def bilinear_reference_fn(m, p, x1, x2, bias=True): + result = torch.einsum('bn,anm,bm->ba', x1, p[0], x2) + if bias: + if x1.shape[0] == 1: + result = result.view(-1) + p[1] + else: + result = result + p[1].view(1, -1).expand(x1.shape[0], p[0].shape[0]) + return result + + module_inputs = [ + ModuleInput(constructor_input=FunctionInput(2, 3, 4), + forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))), + reference_fn=bilinear_reference_fn), + ModuleInput(constructor_input=FunctionInput(2, 3, 4, bias=False), + forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))), + desc='no_bias', + reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1, x2, bias=False)), + ModuleInput(constructor_input=FunctionInput(2, 3, 4), + forward_input=FunctionInput(make_input(2), make_input(3)), + desc='no_batch_dim', + reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1.view(1, -1), x2.view(1, -1))), + ] + + return module_inputs + + +def module_inputs_torch_nn_NLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): + def make_input(shape, device=device, dtype=dtype, requires_grad=requires_grad): + return make_tensor(shape, device=device, dtype=dtype, + requires_grad=False).log_softmax(dim=1).requires_grad_(requires_grad) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_none', {'reduction': 'none'}), + ('ignore_index', {'ignore_index': 2}), + ('weights', {'weight': make_weight(10).abs()}), + ('weights_ignore_index', {'weight': make_weight(10).abs(), 'ignore_index': 2}), + ('weights_ignore_index_neg', {'weight': make_weight(10).abs(), 'ignore_index': -1}) + ] + + # TODO: Uncomment when negative weights is supported. + # negative_weight = make_weight(10) + # negative_weight[0] = -1 + # cases.append(('weights_negative', {'weight': negative_weight})) + module_inputs = [] + for desc, constructor_kwargs in cases: + + def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): + return nllloss_reference(i, t, **constructor_kwargs) + + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input((15, 10)), + torch.empty(15, device=device).uniform_().mul(10).floor().long()), + desc=desc, + reference_fn=reference_fn) + ) + + return module_inputs + + +def module_inputs_torch_nn_GaussianNLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + cases: List[Tuple[str, dict]] = [ + ('', {}), + ('reduction_sum', {'reduction': 'sum'}), + ('reduction_mean', {'reduction': 'mean'}), + ('reduction_none', {'reduction': 'none'}), + ] + + module_inputs = [] + for desc, constructor_kwargs in cases: + module_inputs.append( + ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), + forward_input=FunctionInput(make_input(3), + make_target(3), + make_input(1).abs()), + desc=desc, + reference_fn=no_batch_dim_reference_fn) + ) + + return module_inputs + + +def no_batch_dim_reference_fn(m, p, *args, **kwargs): + """Reference function for modules supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + + Currently it only supports modules which return a single Tensor as output. + You can bind the following kwargs. + Kwargs: + batch_first[bool] : If True, all the Tensors in `args` while be unsqueezed at dim `0` . + and output will be squeezed at dim `0` else dim `1` for both. + kwargs_to_batchify[dict] : Dictionary specifying the name of the argument and dimension to unsqueeze. + Useful if there are few arguments whose batch dimension are different + from the ones selected by `batch_first`. + is_criterion[bool] : Specify if the module is a criterion and handle the reduction for output accordingly. + """ + def get_and_pop(key, default): + v = kwargs.get(key, default) + if key in kwargs: + kwargs.pop(key) + return v + + batch_dim = 0 if get_and_pop('batch_first', True) else 1 + kwargs_to_batchify = get_and_pop('kwargs_to_batchify', None) + is_criterion = get_and_pop('is_criterion', False) + + if kwargs_to_batchify is not None: + assert isinstance(kwargs_to_batchify, dict) + for k, v in kwargs.items(): + if k in kwargs_to_batchify and v is not None: + bdim = kwargs_to_batchify[k] + kwargs[k] = v.unsqueeze(bdim) + + single_batch_input_args = [input.unsqueeze(batch_dim) for input in args] + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs).squeeze(batch_dim) + + if is_criterion: + reduction = get_reduction(m) + if reduction == 'none': + return output.squeeze(0) + return output + + +def no_batch_dim_reference_mha(m, p, *args, **kwargs): + """Reference function for MultiheadAttention supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + """ + batch_dim = 0 if kwargs.get('batch_first', True) else 1 + if 'batch_first' in kwargs: + kwargs.pop('batch_first') + if 'key_padding_mask' in kwargs and kwargs['key_padding_mask'] is not None: + kwargs['key_padding_mask'] = kwargs['key_padding_mask'].unsqueeze(0) + single_batch_input_args = [input.unsqueeze(batch_dim) for input in args] + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(batch_dim), output[1].squeeze(0)) + + +def no_batch_dim_reference_rnn_gru(m, p, *args, **kwargs): + """Reference function for RNN and GRU supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + """ + if len(args) == 1: + inp, = args + h = None + elif len(args) == 2: + inp, h = args + h = h.unsqueeze(1) + + batch_dim = 0 if kwargs['batch_first'] else 1 + kwargs.pop('batch_first') + inp = inp.unsqueeze(batch_dim) + single_batch_input_args = (inp, h) + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(batch_dim), output[1].squeeze(1)) + + +def no_batch_dim_reference_lstm(m, p, *args, **kwargs): + """Reference function for LSTM supporting no batch dimensions. + + Unbatched inputs are unsqueezed to form a + single batch input before passing them to the module. + The output is squeezed to compare with the + output of unbatched input to the module. + """ + if len(args) == 1: + inp, = args + h = None + elif len(args) == 2: + inp, h = args + h = (h[0].unsqueeze(1), h[1].unsqueeze(1)) + + batch_dim = 0 if kwargs['batch_first'] else 1 + kwargs.pop('batch_first') + inp = inp.unsqueeze(batch_dim) + single_batch_input_args = (inp, h) + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(batch_dim), (output[1][0].squeeze(1), output[1][1].squeeze(1))) + + +def no_batch_dim_reference_lstmcell(m, p, *args, **kwargs): + """Reference function for LSTMCell supporting no batch dimensions. + + The module is passed the input and target in batched form with a single item. + The output is squeezed to compare with the no-batch input. + """ + inp, (h, c) = args + single_batch_input_args = (inp.unsqueeze(0), (h.unsqueeze(0), c.unsqueeze(0))) + with freeze_rng_state(): + output = m(*single_batch_input_args, **kwargs) + return (output[0].squeeze(0), output[1].squeeze(0)) + + +def generate_regression_criterion_inputs(make_input): + return [ + ModuleInput( + constructor_input=FunctionInput(reduction=reduction), + forward_input=FunctionInput(make_input((4, )), make_input(4,)), + reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True), + desc=f'no_batch_dim_{reduction}' + ) for reduction in ['none', 'mean', 'sum']] + + +def module_inputs_torch_nn_AvgPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(kernel_size=2), + forward_input=FunctionInput(make_input((3, 6))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput(2), + forward_input=FunctionInput(make_input((2, 3, 6)))), + ModuleInput(constructor_input=FunctionInput((2,), (2,)), + forward_input=FunctionInput(make_input((2, 3, 6))), + desc='stride'), + ModuleInput(constructor_input=FunctionInput(2, 2, 1), + forward_input=FunctionInput(make_input((2, 3, 6))), + desc='stride_pad')] + + +def module_inputs_torch_nn_AvgPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput((2, 2)), + forward_input=FunctionInput(make_input((3, 6, 6))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput((2, 2)), + forward_input=FunctionInput(make_input((2, 3, 6, 6)))), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2)), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='stride'), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), (1, 1)), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='stride_pad'), + ModuleInput(constructor_input=FunctionInput((2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='divisor'), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='divisor_stride'), + ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), (1, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='divisor_stride_pad')] + + + +def module_inputs_torch_nn_AvgPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput((2, 2, 2)), + forward_input=FunctionInput(make_input((3, 4, 4, 4))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput((2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4)))), + ModuleInput(constructor_input=FunctionInput(2, (2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride_pad'), + ModuleInput(constructor_input=FunctionInput(4, 2, (1, 2, 1)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride_pad_gpu_fixedkw_output'), + ModuleInput(constructor_input=FunctionInput((2, 4, 8), 1, (1, 1, 2)), + forward_input=FunctionInput(make_input((2, 3, 2, 4, 8))), + desc='stride_pad_gpu_general_output'), + ModuleInput(constructor_input=FunctionInput(3, 1, 0), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='stride1_pad0_gpu_input'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1)), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='stride_pad_gpu_input_nooverlap'), + ModuleInput(constructor_input=FunctionInput((2, 2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='divisor'), + ModuleInput(constructor_input=FunctionInput(2, (2, 2, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='divisor_stride'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='divisor_stride_pad'), + ModuleInput(constructor_input=FunctionInput(4, 2, (1, 2, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='divisor_stride_pad_gpu_fixedkw_output'), + ModuleInput(constructor_input=FunctionInput((2, 4, 8), 1, (1, 1, 2), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 2, 4, 8))), + desc='divisor_stride_pad_gpu_general_output'), + ModuleInput(constructor_input=FunctionInput(3, 1, 0, divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='divisor_stride1_pad0_gpu_input'), + ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1), divisor_override=1), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='divisor_stride_pad_gpu_input_nooverlap')] + + + +def module_inputs_torch_nn_AdaptiveAvgPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input((1, 3, 5))), + desc='one_output')] + + +def module_inputs_torch_nn_AdaptiveAvgPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='single_1x1output'), + ModuleInput(constructor_input=FunctionInput((3, 4)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((3, None)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple_none')] + +def module_inputs_torch_nn_AdaptiveAvgPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 5, 2, 7))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 2, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput((3, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 3, 7))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((None, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 3, 7))), + desc='tuple_none'), + ModuleInput(constructor_input=FunctionInput((3, 2, 2)), + forward_input=FunctionInput(make_input((1, 1, 3, 2, 6))), + desc='last_dim')] + + +def module_inputs_torch_nn_AdaptiveMaxPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_AdaptiveMaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput((3, 4)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((3, None)), + forward_input=FunctionInput(make_input((1, 3, 5, 6))), + desc='tuple_none')] + + +def module_inputs_torch_nn_AdaptiveMaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), + desc='single'), + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((3, 5, 6, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput((3, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), + desc='tuple'), + ModuleInput(constructor_input=FunctionInput((3, None, 5)), + forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), + desc='tuple_none'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 12, 9, 3))), + desc='single_nonatomic'), + ModuleInput(constructor_input=FunctionInput((3, 4, 5)), + forward_input=FunctionInput(make_input((2, 3, 6, 4, 10))), + desc='tuple_nonatomic')] + + +def module_inputs_torch_nn_BatchNorm1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(10,), + forward_input=FunctionInput(make_input((4, 10))), + desc='affine'), + ModuleInput(constructor_input=FunctionInput(5,), + forward_input=FunctionInput(make_input((4, 5, 3))), + desc='3d_input'), + ModuleInput(constructor_input=FunctionInput(10, 1e-3, None), + forward_input=FunctionInput(make_input((4, 10))), + desc='affine_simple_average'), + ModuleInput(constructor_input=FunctionInput(10, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((4, 10))), + desc='not_affine'), + ModuleInput(constructor_input=FunctionInput(10, 1e-3, 0.3, True, False), + forward_input=FunctionInput(make_input((4, 10))), + desc='not_tracking_stats'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((4, 5, 3))), + desc='3d_input_not_affine'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((0, 5, 9))), + desc='zero_batch')] + + +def module_inputs_torch_nn_BatchNorm2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 6, 6)))), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, None), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='2d_simple_average'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='momentum'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8, False), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='not_affine'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8, True, False), + forward_input=FunctionInput(make_input((2, 3, 6, 6))), + desc='not_tracking_stats'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((0, 5, 2, 2))), + desc='zero_batch')] + + +def module_inputs_torch_nn_BatchNorm3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4)))), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, None), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='3d_simple_average'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='momentum'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7, False), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='not_affine'), + ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7, True, False), + forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), + desc='not_tracking_stats'), + ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), + forward_input=FunctionInput(make_input((0, 5, 2, 2, 2))), + desc='zero_batch')] + + +def module_inputs_torch_nn_ConvNd(module_info, device, dtype, requires_grad, training, **kwargs): + N = kwargs['N'] + lazy = kwargs.get('lazy', False) + transposed = kwargs.get('transposed', False) + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + conv_kwargs_list = [{}] if transposed else [{}, {'padding': 'same'}] + kernel_size, C_in, C_out = 3, 4, 5 + input_no_batch_shape = (C_in,) + tuple(i + 3 for i in range(N)) + input_batch_shape = (2,) + input_no_batch_shape + return [ + ModuleInput(constructor_input=(FunctionInput(C_out, kernel_size, **conv_kwargs) if lazy else + FunctionInput(C_in, C_out, kernel_size, **conv_kwargs)), + forward_input=FunctionInput(make_input( + input_batch_shape if with_batch else input_no_batch_shape)), + desc=('' if with_batch else 'no_batch_dim'), + reference_fn=(None if with_batch else no_batch_dim_reference_fn)) + for with_batch, conv_kwargs in itertools.product([True, False], conv_kwargs_list) + ] + + +def module_inputs_torch_nn_ELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2 * (i.exp() - 1))), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((2, 3, 2, 5))), + desc='4d_input')] + + +def module_inputs_torch_nn_CELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2. * ((.5 * i).exp() - 1))), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2. * ((.5 * i).exp() - 1)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(alpha=2.), + forward_input=FunctionInput(make_input((3,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn)] + + +def module_inputs_torch_nn_GLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((5, 6)))), + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((5, 6, 7))), + desc='dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((4,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn)] + + +def module_inputs_torch_nn_GELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput('none'), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput('none'), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3,))), + desc='no_batch_dim', + reference_fn=no_batch_dim_reference_fn)] + + +def module_inputs_torch_nn_ReLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='channels_last_mem_format'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), + desc='channels_last_3d_mem_format')] + + +def module_inputs_torch_nn_ReLU6(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='channels_last_mem_format'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), + desc='channels_last_3d_mem_format')] + + +def module_inputs_torch_nn_LeakyReLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(0.5), + forward_input=FunctionInput(make_input((3, 2, 5))), + desc='with_negval'), + ModuleInput(constructor_input=FunctionInput(0.0), + forward_input=FunctionInput(make_input((10, 10))), + desc='with_zero_negval'), + ModuleInput(constructor_input=FunctionInput(0.5), + forward_input=FunctionInput(make_input(())), + desc='with_negval_scalar')] + + +def module_inputs_torch_nn_PReLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='1d'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='1d_multiparam'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='2d'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='2d_multiparam'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5, 6))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='3d'), + ModuleInput(constructor_input=FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4, 5, 6))), + reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], + desc='3d_multiparam')] + + +def module_inputs_torch_nn_SELU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar')] + + +def module_inputs_torch_nn_SiLU(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, x, *_: x * torch.sigmoid(x), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((5, 6, 7))), + reference_fn=lambda m, p, x, *_: x * torch.sigmoid(x))] + + +def module_inputs_torch_nn_Softmax(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(1, True).expand(10, 20))), + ModuleInput(constructor_input=FunctionInput(0), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(0, True)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(-1), + forward_input=FunctionInput(make_input((4, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softmax2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((1, 3, 10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(1, False))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_LogSoftmax(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(1, True).expand(10, 20)).log_()), + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((1, 3, 10, 20))), + reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(1, False)).log_(), + desc='multiparam'), + ModuleInput(constructor_input=FunctionInput(0), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(0, False)).log_(), + desc='multiparam_scalar'), + ModuleInput(constructor_input=FunctionInput(-1), + forward_input=FunctionInput(make_input((4, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softmin(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((10, 20)))), + ModuleInput(constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((2, 3, 5, 10))), + desc='multidim'), + ModuleInput(constructor_input=FunctionInput(0), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(-1), + forward_input=FunctionInput(make_input((3, 4, 10))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softplus(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: torch.log(1 + torch.exp(i))), + ModuleInput(constructor_input=FunctionInput(2), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=lambda m, p, i: 1. / 2. * torch.log(1 + torch.exp(2 * i)), + desc='beta'), + ModuleInput(constructor_input=FunctionInput(2, -100), + forward_input=FunctionInput(make_input((10, 20))), + reference_fn=( + lambda m, p, i: ((i * 2) > -100).type_as(i) * i + + ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))), + desc='beta_threshold'), + ModuleInput(constructor_input=FunctionInput(2, -100), + forward_input=FunctionInput(make_input(())), + reference_fn=( + lambda m, p, i: ((i * 2) > -100).type_as(i) * i + + ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))), + desc='beta_threshold_scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softshrink(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5)))), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input((3, 2, 5))), + desc='lambda'), + ModuleInput(constructor_input=FunctionInput(1,), + forward_input=FunctionInput(make_input(())), + desc='lambda_scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Softsign(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: i.div(1 + torch.abs(i))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i.div(1 + torch.abs(i)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Tanh(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + + +def module_inputs_torch_nn_Tanhshrink(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5)))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Threshold(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(2., 1.), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='threshold_value'), + ModuleInput(constructor_input=FunctionInput(2., 10.), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='large_value'), + ModuleInput(constructor_input=FunctionInput(2., 1.), + forward_input=FunctionInput(make_input(())), + desc='threshold_value_scalar'), + ModuleInput(constructor_input=FunctionInput(2., 1.), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_Mish(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((5, 6, 7))), + reference_fn=lambda m, p, i: i * torch.tanh(F.softplus(i))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i * torch.tanh(F.softplus(i)), + desc='scalar'), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim')] + + +def module_inputs_torch_nn_L1Loss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4)), + make_input((2, 3, 4))), + reference_fn=lambda m, p, i, t: 1. / i.numel() * sum((a - b).abs().sum() + for a, b in zip(i, t))), + ModuleInput(constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(()), make_input(())), + reference_fn=lambda m, p, i, t: 1. / i.numel() * (i - t).abs().sum(), + desc='scalar')] + generate_regression_criterion_inputs(make_input) + + +def module_inputs_torch_nn_CrossEntropyLoss(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) + make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) + + reductions = ['sum', 'mean', 'none'] + samples = [] + # Samples below are for validating the no-batch-dim support. + for reduction in reductions: + samples.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction), + forward_input=FunctionInput(make_input((9,)), make_target((), low=0, high=9)), + reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True)) + ) + samples.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, weight=make_weight((9,))), + forward_input=FunctionInput(make_input((9,)), make_target((), low=0, high=9)), + reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True)) + ) + samples.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, label_smoothing=0.5), + forward_input=FunctionInput(make_input((9,)), make_target((), low=0, high=9)), + reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True)) + ) + samples.append( + ModuleInput(constructor_input=FunctionInput(reduction=reduction, label_smoothing=0.5, + weight=make_weight((9,))), + forward_input=FunctionInput(make_input((9,)), make_target((), low=0, high=9)), + reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True)) + ) + + return samples + + +def module_inputs_torch_nn_GroupNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(3, 6, 1e-3), + forward_input=FunctionInput(make_input((4, 6, 5))), + desc='1d_affine'), + ModuleInput( + constructor_input=FunctionInput(3, 12, 1e-3), + forward_input=FunctionInput(make_input((4, 12))), + desc='1d_affine_GN'), + ModuleInput( + constructor_input=FunctionInput(1, 6, 1e-3), + forward_input=FunctionInput(make_input((150, 6))), + desc='1d_affine_large_batch'), + ModuleInput( + constructor_input=FunctionInput(5, 5, 1e-3, False), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_no_affine_IN'), + ModuleInput( + constructor_input=FunctionInput(1, 10, 1e-3, False), + forward_input=FunctionInput(make_input((4, 10))), + desc='1d_no_affine_LN'), + ModuleInput( + constructor_input=FunctionInput(3, 6, 1e-3), + forward_input=FunctionInput(make_input((4, 6, 2, 3))), + desc='2d_affine'), + ModuleInput( + constructor_input=FunctionInput(3, 6, 1e-3), + forward_input=FunctionInput(make_input((4, 6, 28, 28))), + desc='2d_affine_large_feature'), + ModuleInput( + constructor_input=FunctionInput(3, 51, 1e-5, False), + forward_input=FunctionInput(make_input((2, 51, 28, 28))), + desc='2d_no_affine_large_feature'), + ModuleInput( + constructor_input=FunctionInput(3, 3, 1e-3, False), + forward_input=FunctionInput(make_input((4, 3, 2, 3))), + desc='2d_no_affine_IN'), + ModuleInput( + constructor_input=FunctionInput(1, 3, 1e-3, False), + forward_input=FunctionInput(make_input((4, 3, 2, 3))), + desc='2d_no_affine_LN'), + ] + + +def module_inputs_torch_nn_Hardshrink(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(2.), + forward_input=FunctionInput(make_input((4, 3, 2, 4))), + ), + ModuleInput( + constructor_input=FunctionInput(2.), + forward_input=FunctionInput(make_input(())), + desc='scalar', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ) + ] + + +def module_inputs_torch_nn_Hardswish(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 2, 5))), + desc='4d_input') + ] + + +def module_inputs_torch_nn_Hardtanh(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((3, 2, 5))), + reference_fn=lambda m, p, i: i.clamp(-1, 1), + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i.clamp(-1, 1), + desc='scalar', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ) + ] + + +def module_inputs_torch_nn_InstanceNormNd(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + lazy = kwargs.get('lazy', False) + N = kwargs['N'] + num_features, eps, momentum, affine, track_running_stats = 3, 1e-3, 0.3, False, True + input_no_batch_shape_dict = {1: (3, 15), 2: (3, 6, 6), 3: (3, 4, 4, 4)} + input_no_batch_shape = input_no_batch_shape_dict[N] + input_batch_shape = (4,) + input_no_batch_shape + + return [ + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum) if lazy else FunctionInput(num_features, eps, momentum) + ), + forward_input=FunctionInput(make_input(input_batch_shape))), + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum, affine, track_running_stats) if lazy else + FunctionInput(num_features, eps, momentum, affine, track_running_stats) + ), + forward_input=FunctionInput(make_input(input_batch_shape)), + desc='tracking_stats'), + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum) if lazy else FunctionInput(num_features, eps, momentum) + ), + forward_input=FunctionInput(make_input(input_no_batch_shape)), + reference_fn=no_batch_dim_reference_fn, + desc='tracking_stats_no_batch_dim'), + ModuleInput( + constructor_input=( + FunctionInput(eps, momentum, affine, track_running_stats) if lazy else + FunctionInput(num_features, eps, momentum, affine, track_running_stats) + ), + forward_input=FunctionInput(make_input(input_no_batch_shape)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim') + ] + +def module_inputs_torch_nn_LayerNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((128, 5, 5))), + desc='1d_elementwise_affine_large_batch'), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3, False), + forward_input=FunctionInput(make_input((4, 5, 5))), + desc='1d_no_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3, False), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_no_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([5], 1e-3), + forward_input=FunctionInput(make_input((0, 5))), + desc='1d_empty_elementwise_affine'), + ModuleInput( + constructor_input=FunctionInput([2, 2, 5], 1e-3, elementwise_affine=True, bias=False), + forward_input=FunctionInput(make_input((4, 2, 2, 5))), + desc='3d_elementwise_affine_no_bias'), + ] + + +def module_inputs_torch_nn_LocalResponseNorm(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(3,), + forward_input=FunctionInput(make_input((1, 5, 7))), + desc='1d'), + ModuleInput( + constructor_input=FunctionInput(2,), + forward_input=FunctionInput(make_input((1, 5, 7, 7))), + desc='2d_uneven_pad'), + ModuleInput( + constructor_input=FunctionInput(1, 1., 0.5, 2.), + forward_input=FunctionInput(make_input((1, 5, 7, 7, 7))), + desc='3d_custom_params'), + ] + + +def module_inputs_torch_nn_LPPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1.5, 2), + forward_input=FunctionInput(make_input((1, 3, 7))), + desc='norm'), + ModuleInput( + constructor_input=FunctionInput(2, 2, 3), + forward_input=FunctionInput(make_input((1, 3, 7)))), + ModuleInput( + constructor_input=FunctionInput(2, 2, 3), + forward_input=FunctionInput(make_input((3, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim'), + ] + + + +def module_inputs_torch_nn_LPPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(2, 2, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7)))), + ModuleInput( + constructor_input=FunctionInput(1.5, 2), + forward_input=FunctionInput(make_input((1, 3, 7, 7))), + desc='norm'), + ] + + +def module_inputs_torch_nn_MaxPool1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(4), + forward_input=FunctionInput(make_input((2, 10, 4))), + desc='3d_input'), + ModuleInput( + constructor_input=FunctionInput(4, 4), + forward_input=FunctionInput(make_input((2, 10, 4))), + desc='stride'), + ModuleInput( + constructor_input=FunctionInput(4, return_indices=True), + forward_input=FunctionInput(make_input((2, 10, 4))), + desc='return_indices'), + ] + + +def module_inputs_torch_nn_MaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)), + forward_input=FunctionInput(make_input((3, 7, 7))), + desc='3d_input'), + ModuleInput( + constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)), + forward_input=FunctionInput(make_input((1, 3, 7, 7))), + desc='4d_input'), + ModuleInput( + constructor_input=FunctionInput((3, 3), (2, 2), (1, 1), return_indices=True), + forward_input=FunctionInput(make_input((1, 3, 7, 7))), + desc='return_indices'), + ] + +def module_inputs_torch_nn_MaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput((2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5)))), + ModuleInput( + constructor_input=FunctionInput(2, (2, 2, 2)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride'), + ModuleInput( + constructor_input=FunctionInput(2, 2, (1, 1, 1)), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='stride_padding'), + ModuleInput( + constructor_input=FunctionInput(2, 2, (1, 1, 1), return_indices=True), + forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), + desc='return_indices'), + ] + + +def module_inputs_torch_nn_FractionalMaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_random_samples(): + return torch.empty((1, 3, 2), dtype=torch.double, device=device).uniform_() + + return [ + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((1, 3, 5, 7))), + desc='ratio'), + ModuleInput( + constructor_input=FunctionInput((2, 3), output_size=(4, 3), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((1, 3, 7, 6))), + desc='size'), + ModuleInput( + constructor_input=FunctionInput( + 2, output_ratio=0.5, _random_samples=make_random_samples(), return_indices=True + ), + forward_input=FunctionInput(make_input((1, 3, 5, 7))), + desc='ratio_return_indices'), + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((3, 5, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='ratio_no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput((2, 3), output_size=(4, 3), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((3, 7, 6))), + reference_fn=no_batch_dim_reference_fn, + desc='size_no_batch_dim'), + ] + + +def module_inputs_torch_nn_FractionalMaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def make_random_samples(): + return torch.empty((2, 4, 3), dtype=torch.double, device=device).uniform_() + + return [ + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((2, 4, 5, 5, 5))), + desc='ratio'), + ModuleInput( + constructor_input=FunctionInput((2, 2, 2), output_size=(4, 4, 4), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((2, 4, 7, 7, 7))), + desc='size'), + ModuleInput( + constructor_input=FunctionInput((4, 2, 3), output_size=(10, 3, 2), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((2, 4, 16, 7, 5))), + desc='asymsize'), + ModuleInput( + constructor_input=FunctionInput( + 2, output_ratio=0.5, _random_samples=make_random_samples(), return_indices=True + ), + forward_input=FunctionInput(make_input((2, 4, 5, 5, 5))), + desc='ratio_return_indices'), + ModuleInput( + constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((4, 5, 5, 5))), + reference_fn=no_batch_dim_reference_fn, + desc='ratio_no_batch_dim'), + ModuleInput( + constructor_input=FunctionInput((2, 2, 2), output_size=(4, 4, 4), _random_samples=make_random_samples()), + forward_input=FunctionInput(make_input((4, 7, 7, 7))), + reference_fn=no_batch_dim_reference_fn, + desc='size_no_batch_dim'), + ] + + +def module_inputs_torch_nn_Sigmoid(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + desc='scalar' + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + desc='channels_last_mem_format' + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), + desc='channels_last_3d_mem_format' + ) + ] + + +def module_inputs_torch_nn_LogSigmoid(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(())), + reference_fn=lambda m, p, i: i.sigmoid().log(), + desc='scalar' + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input((2, 3, 4))), + reference_fn=lambda m, p, i: i.sigmoid().log(), + ), + ModuleInput( + constructor_input=FunctionInput(), + forward_input=FunctionInput(make_input(4)), + reference_fn=no_batch_dim_reference_fn, + desc='no_batch_dim', + ), + ] + + +def module_inputs_torch_nn_TransformerEncoder(module_info, device, dtype, requires_grad, training, **kwargs): + # Reuse the TransformerEncoderLayer samples since the forward args are nearly the same. + samples = [] + for layer_module_input in module_inputs_torch_nn_TransformerEncoderLayer( + None, device, dtype, requires_grad, training): + # Construct a TransformerEncoderLayer object to pass to TransformerEncoder. + l_args, l_kwargs = (layer_module_input.constructor_input.args, + layer_module_input.constructor_input.kwargs) + l_kwargs['device'] = device + l_kwargs['dtype'] = dtype + encoder_layer = torch.nn.TransformerEncoderLayer(*l_args, **l_kwargs) + num_layers = 2 + # Note: TransformerEncoderLayer takes a "src_mask" while + # TransformerEncoder takes a "mask"; rename kwarg appropriately. + forward_input = layer_module_input.forward_input + if 'src_mask' in forward_input.kwargs: + forward_input.kwargs['mask'] = forward_input.kwargs['src_mask'] + del forward_input.kwargs['src_mask'] + samples.append(ModuleInput( + constructor_input=FunctionInput(encoder_layer, num_layers), + forward_input=forward_input, + desc=layer_module_input.desc + )) + return samples + +def module_inputs_torch_nn_TransformerEncoderLayer(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + samples = [ + ModuleInput( + constructor_input=FunctionInput(4, 2, 16, 0.0), + forward_input=FunctionInput( + make_input((2, 3, 4)) + ), + desc='relu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu), + forward_input=FunctionInput( + make_input((2, 3, 4)) + ), + desc='gelu_activation' + ), ] + + # Samples below are for validating the no-batch-dim support. + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) + for src_mask, src_key_padding_mask, norm_first in itertools.product(attn_masks, key_padding_masks, (True, False)): + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + dropout=0.0, batch_first=True, norm_first=norm_first), + forward_input=FunctionInput( + make_input((3, 4)), src_mask=src_mask, src_key_padding_mask=src_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=True, kwargs_to_batchify={'src_key_padding_mask': 0}), + desc='no_batch_dim_batch_first' + )) + + samples.append( + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, dropout=0.0, batch_first=False, norm_first=norm_first), + forward_input=FunctionInput( + make_input((3, 4)), src_mask=src_mask, src_key_padding_mask=src_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=False, kwargs_to_batchify={'src_key_padding_mask': 0}), + desc='no_batch_dim' + )) + + def fast_path_reference_fn(module, parameters, *args, **kwargs): + assert not module.training + module = module.train(True) + output = module(*args, **kwargs) + module = module.train(False) + return output + + if not training: + for norm_first in (True, False): + samples.append( + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, dropout=0.0, batch_first=True, norm_first=norm_first), + forward_input=FunctionInput( + make_input((2, 3, 4)), + ), + reference_fn=fast_path_reference_fn, + desc="fast_path_norm_first" if norm_first else "fast_path" + ) + ) + + return samples + + +def module_inputs_torch_nn_TransformerDecoderLayer(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + samples = [ + ModuleInput( + constructor_input=FunctionInput(4, 2, 16, 0.0), + forward_input=FunctionInput( + make_input((2, 3, 4)), make_input((2, 3, 4)) + ), + desc='relu_activation' + ), + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu), + forward_input=FunctionInput( + make_input((2, 3, 4)), make_input((2, 3, 4)) + ), + desc='gelu_activation' + ), ] + + # Samples below are for validating the no-batch-dim support. + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) + for tgt_mask, tgt_key_padding_mask, norm_first in itertools.product(attn_masks, key_padding_masks, (True, False)): + # Using same mask for tgt and memory + memory_mask = tgt_mask + memory_key_padding_mask = tgt_key_padding_mask + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + dropout=0.0, batch_first=True, norm_first=norm_first), + forward_input=FunctionInput( + make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=True, + kwargs_to_batchify={'tgt_key_padding_mask': 0, 'memory_key_padding_mask': 0}), + desc='no_batch_dim_batch_first' + )) + + samples.append( + ModuleInput( + constructor_input=FunctionInput(4, 2, 8, dropout=0.0, batch_first=False, norm_first=norm_first), + forward_input=FunctionInput( + make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=False, + kwargs_to_batchify={'tgt_key_padding_mask': 0, 'memory_key_padding_mask': 0}), + desc='no_batch_dim' + )) + + return samples + + +def module_inputs_torch_nn_Transformer(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [] + # Samples below are for validating the no-batch-dim support. + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) + for mask, key_padding_mask, norm_first, bias in \ + itertools.product(attn_masks, key_padding_masks, (True, False), (True, False)): + # Using same mask for tgt and memory + src_mask , tgt_mask = (mask,) * 2 + src_key_padding_mask, tgt_key_padding_mask = (key_padding_mask,) * 2 + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + num_encoder_layers=1, num_decoder_layers=1, + dropout=0.0, batch_first=True, norm_first=norm_first, bias=bias), + forward_input=FunctionInput( + make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, src_mask=src_mask, + tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=True, + kwargs_to_batchify={'tgt_key_padding_mask': 0, 'src_key_padding_mask': 0}), + desc='no_batch_dim_batch_first' + )) + + samples.append( + ModuleInput( + constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, + num_encoder_layers=1, num_decoder_layers=1, + dropout=0.0, batch_first=False, norm_first=norm_first), + forward_input=FunctionInput( + make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, src_mask=src_mask, + tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask + ), + reference_fn=partial(no_batch_dim_reference_fn, + batch_first=False, + kwargs_to_batchify={'tgt_key_padding_mask': 0, 'src_key_padding_mask': 0}), + desc='no_batch_dim' + )) + + return samples + + +def module_inputs_torch_nn_Embedding(module_info, device, dtype, requires_grad, training, **kwargs): + make_empty = partial(torch.empty, device=device, dtype=torch.long, requires_grad=False) + return [ + ModuleInput( + constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3), + forward_input=FunctionInput(make_empty(2, 3).random_(4)) + ), + ModuleInput( + constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3), + forward_input=FunctionInput(make_empty(1, 512).random_(4).expand(7, 512)), + desc='discontiguous' + ), + ] + + +def module_inputs_torch_nn_MultiheadAttention(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [] + bool_vals = (True, False) + key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) + attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3, 3))) + products = itertools.product(bool_vals, bool_vals, bool_vals, key_padding_masks, attn_masks) + for bias, add_bias_kv, add_zero_attn, key_padding_mask, attn_mask in products: + samples.append( + ModuleInput( + constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=True, + bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn), + forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)), + key_padding_mask=key_padding_mask, attn_mask=attn_mask), + reference_fn=no_batch_dim_reference_mha, + ) + ) + samples.append( + ModuleInput( + constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=False, + bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn), + forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)), + key_padding_mask=key_padding_mask, attn_mask=attn_mask), + reference_fn=partial(no_batch_dim_reference_mha, batch_first=False), + ) + ) + + return samples + + +def module_inputs_torch_nn_RNN_GRU_Cell(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [ + ModuleInput( + constructor_input=FunctionInput(5, 10), + forward_input=FunctionInput(make_input(5), make_input(10)), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput(5, 10, bias=True), + forward_input=FunctionInput(make_input(5), make_input(10)), + reference_fn=no_batch_dim_reference_fn, + ) + ] + + is_rnn = kwargs.get('is_rnn', False) + if is_rnn: + # RNN also supports `nonlinearity` argument. + # `tanh` is the default, so we check with `relu` + samples.append( + ModuleInput( + constructor_input=FunctionInput(5, 10, bias=True, nonlinearity='relu'), + forward_input=FunctionInput(make_input(5), make_input(10)), + reference_fn=no_batch_dim_reference_fn, + ) + ) + + return samples + + +def module_inputs_torch_nn_LSTMCell(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = ( + ModuleInput( + constructor_input=FunctionInput(5, 10), + forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))), + reference_fn=no_batch_dim_reference_lstmcell, + ), + ModuleInput( + constructor_input=FunctionInput(5, 10, bias=True), + forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))), + reference_fn=no_batch_dim_reference_lstmcell, + ), + ) + + return samples + +def make_packed_sequence(inp, batch_sizes): + required_grad = inp.requires_grad + inp.requires_grad_(False) # user won't have access to inp so won't be able to get its grads + seq = pack_padded_sequence(inp, batch_sizes) + seq.data.requires_grad_(required_grad) + return seq + + +def module_inputs_torch_nn_RNN_GRU(module_info, device, dtype, requires_grad, training, with_packed_sequence=False, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + is_rnn = kwargs['is_rnn'] + nonlinearity = ('relu', 'tanh') + bias = (False, True) + batch_first = (False, True) + bidirectional = (False, True) + + samples = [] + if is_rnn: + prod_gen = product(nonlinearity, bias, batch_first, bidirectional) + else: + prod_gen = product(bias, batch_first, bidirectional) + + for args in prod_gen: + if is_rnn: + nl, b, b_f, bidir = args + else: + b, b_f, bidir = args + + cons_args = {'input_size': 2, 'hidden_size': 2, 'num_layers': 2, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + cons_args_hidden = {'input_size': 2, 'hidden_size': 3, 'num_layers': 2, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + + if is_rnn: + cons_args['nonlinearity'] = nl + cons_args_hidden['nonlinearity'] = nl + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_input((3, 2))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args_hidden), + forward_input=FunctionInput(make_input((3, 2)), make_input((4 if bidir else 2, 3))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + if with_packed_sequence: + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_packed_sequence(make_input((5, 2, 2)), torch.tensor([5, 3]))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_packed_sequence(make_input((5, 5, 2)), torch.tensor([5, 3, 3, 2, 2]))), + reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), + ) + ) + + return samples + + +def module_inputs_torch_nn_LSTM(module_info, device, dtype, requires_grad, training, **kwargs): + # Currently all samples below are for validating the no-batch-dim support. + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + bias = (False, True) + batch_first = (False, True) + bidirectional = (False, True) + proj_sizes = (0, 2) + + samples = [] + prod_gen = product(bias, batch_first, bidirectional, proj_sizes) + + for args in prod_gen: + b, b_f, bidir, proj_size = args + hidden_size = 3 + cons_args = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + cons_args_hidden = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size, + 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} + + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args), + forward_input=FunctionInput(make_input((2, 2))), + reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f), + ) + ) + + h_out = proj_size if proj_size > 0 else hidden_size + hx = (make_input((4 if bidir else 2, h_out)), make_input((4 if bidir else 2, hidden_size))) + samples.append( + ModuleInput( + constructor_input=FunctionInput(**cons_args_hidden), + forward_input=FunctionInput(make_input((3, 2)), hx), + reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f), + ) + ) + + + return samples + + + +def module_inputs_torch_nn_ReflectionPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((2, 3))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((2, 3, 4))), + ), + ] + +def module_inputs_torch_nn_ReflectionPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4)), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + ), + ] + +def module_inputs_torch_nn_ReflectionPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 1, 2, 1, 2)), + forward_input=FunctionInput(make_input((3, 3, 3, 3, 3))), + ), + ] + +def module_inputs_torch_nn_ReplicationPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ReplicationPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4)), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + ), + ] + +def module_inputs_torch_nn_ReplicationPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4, 5, 6)), + forward_input=FunctionInput(make_input((3, 4, 5, 6, 7))), + ), + ] + +def module_inputs_torch_nn_ZeroPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ZeroPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4)), + forward_input=FunctionInput(make_input((1, 2, 3, 4))), + ), + ] + +def module_inputs_torch_nn_ZeroPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4, 5, 6)), + forward_input=FunctionInput(make_input((1, 2, 3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ConstantPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1, 2), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2), 3), + forward_input=FunctionInput(make_input((3, 4, 5))), + ), + ] + +def module_inputs_torch_nn_ConstantPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1, 3), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4), 5), + forward_input=FunctionInput(make_input((1, 2, 3, 4))), + ), + ] + +def module_inputs_torch_nn_ConstantPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + return [ + ModuleInput( + constructor_input=FunctionInput(1, 3), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 3, 4, 5, 6), 7), + forward_input=FunctionInput(make_input((1, 2, 1, 2, 1))), + ), + ] + +def module_inputs_torch_nn_CircularPad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def padding1d_circular_ref(inp, pad): + r""" input: + [[[0., 1., 2.], + [3., 4., 5.]]] + pad: (1, 2) + output: + [[[2., 0., 1., 2., 0., 1.], + [5., 3., 4., 5., 3., 4.]]] + """ + return torch.cat([inp[:, :, -pad[0]:], inp, inp[:, :, :pad[1]]], dim=2) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4))), + reference_fn=no_batch_dim_reference_fn + ), + ModuleInput( + constructor_input=FunctionInput((1, 2)), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((3, 1)), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((3, 3)), + forward_input=FunctionInput(make_input((1, 2, 3))), + reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), + ), + ] + +def module_inputs_torch_nn_CircularPad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + def padding2d_circular_ref(inp, pad): + r"""input: + [[[[0., 1., 2], + [3., 4., 5.]]]] + pad: (1, 2, 2, 1) + output: + [[[[2., 0., 1., 2., 0., 1.], + [5., 3., 4., 5., 3., 4.], + [2., 0., 1., 2., 0., 1.], + [5., 3., 4., 5., 3., 4.], + [2., 0., 1., 2., 0., 1.]]]] + """ + inp = torch.cat([inp[:, :, -pad[2]:], inp, inp[:, :, :pad[3]]], dim=2) + return torch.cat([inp[:, :, :, -pad[0]:], inp, inp[:, :, :, :pad[1]]], dim=3) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 2, 1)), + forward_input=FunctionInput(make_input((1, 1, 2, 3))), + reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((2, 3, 2, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 3))), + reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), + ), + ModuleInput( + constructor_input=FunctionInput((3, 3, 3, 1)), + forward_input=FunctionInput(make_input((1, 1, 3, 3))), + reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), + ), + ] + +def module_inputs_torch_nn_CircularPad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + + def padding3d_circular_ref(inp, pad): + r"""input: + [[[[[ 0., 1., 2.], + [ 3., 4., 5.]], + [[ 6., 7., 8.], + [ 9., 10., 11.]]]]] + pad: (1, 2, 2, 1, 1, 2) + output: [[[[[ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.]], + + [[ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.]], + + [[ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.]], + + [[ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.], + [ 5., 3., 4., 5., 3., 4.], + [ 2., 0., 1., 2., 0., 1.]], + + [[ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.], + [11., 9., 10., 11., 9., 10.], + [ 8., 6., 7., 8., 6., 7.]]]]] + """ + inp = torch.cat([inp[:, :, -pad[4]:], inp, inp[:, :, :pad[5]]], dim=2) + inp = torch.cat([inp[:, :, :, -pad[2]:], inp, inp[:, :, :, :pad[3]]], dim=3) + return torch.cat([inp[:, :, :, :, -pad[0]:], inp, inp[:, :, :, :, :pad[1]]], dim=4) + + return [ + ModuleInput( + constructor_input=FunctionInput(1), + forward_input=FunctionInput(make_input((3, 4, 5, 6))), + reference_fn=no_batch_dim_reference_fn, + ), + ModuleInput( + constructor_input=FunctionInput((1, 2, 1, 2, 1, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), + reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) + ), + ModuleInput( + constructor_input=FunctionInput((3, 2, 2, 1, 1, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), + reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) + ), + ModuleInput( + constructor_input=FunctionInput((3, 3, 2, 1, 2, 2)), + forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), + reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) + ), + ] + + +# All these operators share similar issues on cuDNN and MIOpen +rnn_gru_lstm_module_info_decorators = ( + # RuntimeError: Batching rule not implemented for aten::_cudnn_rnn_backward. + # We could not generate a fallback + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_grad", + active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' + ), + # NotImplementedError: the derivative for '_cudnn_rnn_backward' is not implemented. + # Double backwards is not supported for CuDNN RNNs due to limitations in the CuDNN API + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_gradgrad", + active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' + ), + # CUDNN GRU doesn't accept non-contiguous hx + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors", + active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' + ), + # MIOPEN GRU doesn't accept non-contiguous hx (this is dispatched to miopen only for float). + DecorateInfo( + unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors", + active_if=(TEST_CUDNN and TEST_WITH_ROCM), dtypes=(torch.float,), device_type='cuda' + ), + DecorateInfo( + skipCUDAVersionIn([(11, 7)]), "TestExpandedWeightModule", "test_module", + device_type='cuda' + ), + DecorateInfo( + skipCUDAVersionIn([(11, 7)]), "TestDecomp", "test_rnn_decomp_module", + device_type='cuda' + ) +) + +# Start of module error inputs functions. + +def module_error_inputs_torch_nn_RNN_GRU_Cell(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 11), make_input(3, 20)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="input has inconsistent input_size: got 11 expected 10" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), make_input(5, 20)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="Input batch size 3 doesn't match hidden0 batch size 5" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 1, 1, 20)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex="Expected hidden to be 1D or 2D, got 4D instead" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20, 'relu'), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20, 'tanh'), + forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ] + return samples + +def module_error_inputs_torch_nn_LSTMCell(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + samples = [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 11), (make_input(3, 20), make_input(3, 20))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="input has inconsistent input_size: got 11 expected 10" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), (make_input(3, 21), make_input(3, 21))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), (make_input(5, 20), make_input(5, 20))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=RuntimeError, + error_regex="Input batch size 3 doesn't match hidden0 batch size 5" + ), + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(10, 20), + forward_input=FunctionInput(make_input(3, 10), (make_input(3, 1, 1, 20), make_input(3, 1, 1, 20))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex="Expected hx\\[0\\] to be 1D or 2D, got 4D instead" + ), + ] + return samples + + +def module_error_inputs_torch_nn_RNN_GRU(module_info, device, dtype, requires_grad, training, **kwargs): + samples = [ + ErrorModuleInput( + ModuleInput(constructor_input=FunctionInput(10, 0, 1)), + error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, + error_type=ValueError, + error_regex="hidden_size must be greater than zero" + ), + ErrorModuleInput( + ModuleInput(constructor_input=FunctionInput(10, 10, 0)), + error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, + error_type=ValueError, + error_regex="num_layers must be greater than zero" + ), + ] + return samples + +def module_error_inputs_torch_nn_Pad1d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + is_constant = kwargs.get('is_constant', False) + + return [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3, 4, 5))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex=r"expected 2D or 3D input \(got 4D input\)", + + ), + ] + +def module_error_inputs_torch_nn_Pad2d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + is_constant = kwargs.get('is_constant', False) + + return [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex=r"expected 3D or 4D input \(got 2D input\)", + + ), + ] + +def module_error_inputs_torch_nn_Pad3d(module_info, device, dtype, requires_grad, training, **kwargs): + make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + + is_constant = kwargs.get('is_constant', False) + + return [ + ErrorModuleInput( + ModuleInput( + constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), + forward_input=FunctionInput(make_input((2, 3))), + ), + error_on=ModuleErrorEnum.FORWARD_ERROR, + error_type=ValueError, + error_regex=r"expected 4D or 5D input \(got 2D input\)", + + ), + ] + + +# Database of ModuleInfo entries in alphabetical order. +module_db: List[ModuleInfo] = [ + ModuleInfo(torch.nn.AdaptiveAvgPool1d, + module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool1d, + skips=( + # Fails on MPS backend if input/output sizes are not divisible + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.AdaptiveAvgPool2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool2d, + skips=( + # Fails on MPS backend if input/output sizes are not divisible + DecorateInfo(skipMPS), + # Fails on backward check if output size is 1x1 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + ),) + ), + ModuleInfo(torch.nn.AdaptiveAvgPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool3d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.AdaptiveMaxPool1d, + module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool1d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.AdaptiveMaxPool2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool2d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.AdaptiveMaxPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool3d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.AvgPool1d, + module_inputs_func=module_inputs_torch_nn_AvgPool1d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.AvgPool2d, + module_inputs_func=module_inputs_torch_nn_AvgPool2d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # The difference between channels last backward and + # channels first backward of AvgPool2d on CUDA is too large + # See https://github.com/pytorch/pytorch/issues/107201 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='cuda', + ),), + ), + ModuleInfo(torch.nn.AvgPool3d, + module_inputs_func=module_inputs_torch_nn_AvgPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # No channels_last support for AvgPool1d as it does not take 4D inputs + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.BatchNorm1d, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_BatchNorm1d, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS), + # tracking here rather than in the list in test_aotdispatch.py as eval mode passes + # RuntimeError: tried to get Double out of SymInt + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_symbolic_module_exhaustive', + active_if=lambda p: p['training'] + ), + # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_module_exhaustive', + active_if=lambda p: p['training'] + )) + ), + ModuleInfo(torch.nn.BatchNorm2d, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_BatchNorm2d, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS), + # tracking here rather than in the list in test_aotdispatch.py as eval mode passes + # RuntimeError: tried to get Double out of SymInt + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_symbolic_module_exhaustive', + active_if=lambda p: p['training'] + ), + # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_module_exhaustive', + active_if=lambda p: p['training'] + ),) + ), + ModuleInfo(torch.nn.BatchNorm3d, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_BatchNorm3d, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + # tracking here rather than in the list in test_aotdispatch.py as eval mode passes + # RuntimeError: tried to get Double out of SymInt + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_symbolic_module_exhaustive', + active_if=lambda p: p['training'] + ), + # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default + DecorateInfo( + unittest.expectedFailure, 'TestEagerFusionModuleInfo', + 'test_aot_autograd_module_exhaustive', + active_if=lambda p: p['training'] + ),) + ), + ModuleInfo(torch.nn.CELU, + module_inputs_func=module_inputs_torch_nn_CELU, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.Conv1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]) + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.Conv2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='cuda', dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.Conv3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Conv3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.ConvTranspose1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + dtypes=floating_and_complex_types_and(torch.chalf), + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + DecorateInfo(skipIfMps, 'TestModule', + dtypes=complex_types_and(torch.chalf, torch.float64, torch.complex128)), + # Not implmented for chalf on CPU + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', + dtypes=(torch.chalf,), device_type='cuda'), + # Ref: https://github.com/pytorch/pytorch/issues/73502 + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_pickle', dtypes=(torch.chalf,)), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.ConvTranspose2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + dtypes=floating_and_complex_types_and(torch.chalf), + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + DecorateInfo(skipIfMps, 'TestModule', + dtypes=complex_types_and(torch.chalf, torch.float64, torch.complex128)), + # Fails on backward check because ViewAsRealBackward apply contiguous for grad + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format', + dtypes=(torch.complex32, torch.complex64, torch.complex128)), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', + dtypes=[torch.float64, torch.complex128]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + # Not implemented for chalf on CPU + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', + dtypes=(torch.chalf,), device_type='cuda'), + # Ref: https://github.com/pytorch/pytorch/issues/73502 + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_pickle', dtypes=(torch.chalf,)), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.ConvTranspose3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False, transposed=True), + dtypes=floating_and_complex_types_and(torch.chalf), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # ConvTranspose3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + # These fail only on ROCm + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', + dtypes=[torch.complex32, torch.complex64], active_if=TEST_WITH_ROCM), + # Not implmented for chalf on CPU + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', + dtypes=(torch.chalf,), device_type='cuda'), + # Ref: https://github.com/pytorch/pytorch/issues/73502 + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_pickle', dtypes=(torch.chalf,)), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + DecorateInfo(precisionOverride({torch.complex64: 1e-04}), 'TestModule', 'test_cpu_gpu_parity'), + DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.ELU, + module_inputs_func=module_inputs_torch_nn_ELU, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.FractionalMaxPool2d, + module_inputs_func=module_inputs_torch_nn_FractionalMaxPool2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.FractionalMaxPool3d, + module_inputs_func=module_inputs_torch_nn_FractionalMaxPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.L1Loss, + module_inputs_func=module_inputs_torch_nn_L1Loss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.LazyConv1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConv2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='cuda', dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConv3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # LazyConv3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConvTranspose1d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConvTranspose2d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 7603 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', + dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", + device_type='mps', dtypes=[torch.float32]), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.LazyConvTranspose3d, + module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True, transposed=True), + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + module_memformat_affects_out=True, + skips=( + # channels_last support on cuda requires cudnn >= 8005 + DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), + # Failure on ROCM for float32 issue #70125 + DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), + # Lazy modules don't currently play well with ModuleInfo tests on the meta device. + # See https://github.com/pytorch/pytorch/issues/70505 for more info. + DecorateInfo(skipMeta), + # LazyConvTranspose3d is not supported on MPS backend + DecorateInfo(skipMPS), + # This was wrongly being skipped before and needs investigation. + # See https://github.com/pytorch/pytorch/issues/80247 + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), + ), + decorators=( + DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), + )), + ModuleInfo(torch.nn.Linear, + module_inputs_func=module_inputs_torch_nn_Linear, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # No channels_last support for Linear currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Bilinear, + module_inputs_func=module_inputs_torch_nn_Bilinear, + decorators=[ + DecorateInfo( + toleranceOverride({ + torch.float32: tol(atol=1e-4, rtol=1e-4), + torch.float64: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_forward', device_type='cpu') + ], + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # No channels_last support for Bilinear currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.LPPool1d, + module_inputs_func=module_inputs_torch_nn_LPPool1d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.LPPool2d, + module_inputs_func=module_inputs_torch_nn_LPPool2d, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.MaxPool1d, + module_inputs_func=module_inputs_torch_nn_MaxPool1d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.MaxPool2d, + module_inputs_func=module_inputs_torch_nn_MaxPool2d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.MaxPool3d, + module_inputs_func=module_inputs_torch_nn_MaxPool3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.NLLLoss, + module_inputs_func=module_inputs_torch_nn_NLLLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.GaussianNLLLoss, + module_inputs_func=module_inputs_torch_nn_GaussianNLLLoss, + skips=( + # No channels_last support for loss functions. + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)), + ModuleInfo(torch.nn.CrossEntropyLoss, + module_inputs_func=module_inputs_torch_nn_CrossEntropyLoss, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.GELU, + module_inputs_func=module_inputs_torch_nn_GELU, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.GLU, + module_inputs_func=module_inputs_torch_nn_GLU, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.GroupNorm, + module_inputs_func=module_inputs_torch_nn_GroupNorm, + dtypes=get_all_fp_dtypes(include_bfloat16=True, include_half=True), + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64, torch.bfloat16]), + # Tracking at https://github.com/pytorch/pytorch/issues/98089 + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_cpu_gpu_parity'), + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_memory_format', device_type='cpu'), + # No channels_last support for GroupNorm currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='mps'), + DecorateInfo(unittest.skip("Skipped!"), "TestModule", "test_grad", + active_if=TEST_WITH_ROCM, device_type='cuda'),) + ), + ModuleInfo(torch.nn.Hardshrink, + module_inputs_func=module_inputs_torch_nn_Hardshrink, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),), + ), + ModuleInfo(torch.nn.Hardswish, + module_inputs_func=module_inputs_torch_nn_Hardswish, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='mps', + ),), + supports_gradgrad=False), + ModuleInfo(torch.nn.Hardtanh, + module_inputs_func=module_inputs_torch_nn_Hardtanh, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),), + ), + ModuleInfo(torch.nn.InstanceNorm1d, + module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=1), + train_and_eval_differ=True, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # No channels_last support for InstanceNorm1d currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.InstanceNorm2d, + module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=2), + train_and_eval_differ=True, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # No channels_last support for InstanceNorm2d currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.InstanceNorm3d, + module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=3), + train_and_eval_differ=True, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS), + # No channels_last support for InstanceNorm3d currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.LocalResponseNorm, + module_inputs_func=module_inputs_torch_nn_LocalResponseNorm, + skips=( + # uses avg_pool3d which is not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.LayerNorm, + module_inputs_func=module_inputs_torch_nn_LayerNorm, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # No channels_last support for LayerNorm currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + # TransformerEncoder takes the same inputs as TransformerEncoderLayer + ModuleInfo(torch.nn.TransformerEncoder, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_TransformerEncoder, + decorators=[ + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for TransformerEncoderLayer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + # Doesn't support device / dtype kwargs directly because it is just a + # container of TransformerEncoderLayers. + DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_factory_kwargs'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.TransformerEncoderLayer, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_TransformerEncoderLayer, + decorators=[ + DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), + 'TestModule', 'test_non_contiguous_tensors', + device_type='cpu', active_if=IS_WINDOWS), + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for TransformerEncoderLayer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.TransformerDecoderLayer, + module_inputs_func=module_inputs_torch_nn_TransformerDecoderLayer, + decorators=[ + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for TransformerDecoderLayer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.Transformer, + module_inputs_func=module_inputs_torch_nn_Transformer, + decorators=[ + # Not implemented for SDPA backward derivative + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', + device_type='cpu'), + ], + skips=( + # No channels_last support for Transformer currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.MultiheadAttention, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_MultiheadAttention, + skips=( + # No channels_last support for MultiheadAttention currently. + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.Embedding, + module_inputs_func=module_inputs_torch_nn_Embedding, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.ReLU, + module_inputs_func=module_inputs_torch_nn_ReLU, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.LeakyReLU, + module_inputs_func=module_inputs_torch_nn_LeakyReLU, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.ReLU6, + module_inputs_func=module_inputs_torch_nn_ReLU6, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.PReLU, + module_inputs_func=module_inputs_torch_nn_PReLU, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.RNNCell, + module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU_Cell, is_rnn=True), + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU_Cell, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.GRUCell, + module_inputs_func=module_inputs_torch_nn_RNN_GRU_Cell, + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU_Cell, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.LSTMCell, + module_inputs_func=module_inputs_torch_nn_LSTMCell, + module_error_inputs_func=module_error_inputs_torch_nn_LSTMCell, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.Sigmoid, + module_inputs_func=module_inputs_torch_nn_Sigmoid, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.LogSigmoid, + module_inputs_func=module_inputs_torch_nn_LogSigmoid, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.SiLU, + module_inputs_func=module_inputs_torch_nn_SiLU, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.Softmax, + module_inputs_func=module_inputs_torch_nn_Softmax, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.Softmax2d, + module_inputs_func=module_inputs_torch_nn_Softmax2d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # no channels last support for Softmax2d currently + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.LogSoftmax, + module_inputs_func=module_inputs_torch_nn_LogSoftmax, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # no channels last support for LogSoftmax currently + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Softmin, + module_inputs_func=module_inputs_torch_nn_Softmin, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # no channels last support for Softmin currently + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) + ), + ModuleInfo(torch.nn.Softplus, + module_inputs_func=module_inputs_torch_nn_Softplus, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.Softshrink, + module_inputs_func=module_inputs_torch_nn_Softshrink, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.Softsign, + module_inputs_func=module_inputs_torch_nn_Softsign, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.Tanh, + module_inputs_func=module_inputs_torch_nn_Tanh, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.Tanhshrink, + module_inputs_func=module_inputs_torch_nn_Tanhshrink, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # Fails on backward check on MPS + # See https://github.com/pytorch/pytorch/issues/107214 + DecorateInfo( + unittest.expectedFailure, + 'TestModule', + 'test_memory_format', + active_if=lambda p: p['training'], + device_type='mps', + ),) + ), + ModuleInfo(torch.nn.Threshold, + module_inputs_func=module_inputs_torch_nn_Threshold, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.Mish, + module_inputs_func=module_inputs_torch_nn_Mish, + skips=( + # not supported on MPS backend + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.RNN, + train_and_eval_differ=True, + module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=True), + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),), + decorators=rnn_gru_lstm_module_info_decorators + ), + ModuleInfo(torch.nn.GRU, + train_and_eval_differ=True, + module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=False), + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),), + decorators=rnn_gru_lstm_module_info_decorators), + ModuleInfo(torch.nn.LSTM, + train_and_eval_differ=True, + module_inputs_func=module_inputs_torch_nn_LSTM, + module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, + skips=( + # LSTM with projections is not currently supported with MPS + DecorateInfo(skipMPS),), + decorators=rnn_gru_lstm_module_info_decorators), + ModuleInfo(torch.nn.ReflectionPad1d, + module_inputs_func=module_inputs_torch_nn_ReflectionPad1d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.ReflectionPad2d, + module_inputs_func=module_inputs_torch_nn_ReflectionPad2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.ReflectionPad3d, + module_inputs_func=module_inputs_torch_nn_ReflectionPad3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.ReplicationPad1d, + module_inputs_func=module_inputs_torch_nn_ReplicationPad1d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.ReplicationPad2d, + module_inputs_func=module_inputs_torch_nn_ReplicationPad2d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.ReplicationPad3d, + module_inputs_func=module_inputs_torch_nn_ReplicationPad3d, + gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, + skips=( + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='cuda'), + DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', + device_type='mps'), + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.SELU, + module_inputs_func=module_inputs_torch_nn_SELU, + skips=( + # test fails on MPS backend and is being investigated. + # See https://github.com/pytorch/pytorch/issues/100914 + DecorateInfo(skipMPS),) + ), + ModuleInfo(torch.nn.ZeroPad1d, + module_inputs_func=module_inputs_torch_nn_ZeroPad1d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.ZeroPad2d, + module_inputs_func=module_inputs_torch_nn_ZeroPad2d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ), + ModuleInfo(torch.nn.ZeroPad3d, + module_inputs_func=module_inputs_torch_nn_ZeroPad3d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ), + ModuleInfo(torch.nn.CircularPad1d, + module_inputs_func=module_inputs_torch_nn_CircularPad1d, + module_error_inputs_func=module_error_inputs_torch_nn_Pad1d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.CircularPad2d, + module_inputs_func=module_inputs_torch_nn_CircularPad2d, + module_error_inputs_func=module_error_inputs_torch_nn_Pad2d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.CircularPad3d, + module_inputs_func=module_inputs_torch_nn_CircularPad3d, + module_error_inputs_func=module_error_inputs_torch_nn_Pad3d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),) + ), + ModuleInfo(torch.nn.ConstantPad1d, + module_inputs_func=module_inputs_torch_nn_ConstantPad1d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]),) + ), + ModuleInfo(torch.nn.ConstantPad2d, + module_inputs_func=module_inputs_torch_nn_ConstantPad2d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ), + ModuleInfo(torch.nn.ConstantPad3d, + module_inputs_func=module_inputs_torch_nn_ConstantPad3d, + skips=( + DecorateInfo(skipIfMps, 'TestModule', dtypes=[torch.float64]), + # Fails with channels last test on MPS backend + DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) + ) +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_pruning.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_pruning.py new file mode 100644 index 0000000000000000000000000000000000000000..b6cbd92105f3f46fa10ed74a5be45f7338c8b992 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_pruning.py @@ -0,0 +1,384 @@ +# Owner(s): ["module: unknown"] + +from torch.ao.pruning import BaseSparsifier +import torch +import torch.nn.functional as F +from torch import nn + +class ImplementedSparsifier(BaseSparsifier): + def __init__(self, **kwargs): + super().__init__(defaults=kwargs) + + def update_mask(self, module, **kwargs): + module.parametrizations.weight[0].mask[0] = 0 + linear_state = self.state['linear1.weight'] + linear_state['step_count'] = linear_state.get('step_count', 0) + 1 + + +class MockSparseLinear(nn.Linear): + """ + This class is a MockSparseLinear class to check convert functionality. + It is the same as a normal Linear layer, except with a different type, as + well as an additional from_dense method. + """ + @classmethod + def from_dense(cls, mod): + """ + """ + linear = cls(mod.in_features, + mod.out_features) + return linear + + +def rows_are_subset(subset_tensor, superset_tensor) -> bool: + """ + Checks to see if all rows in subset tensor are present in the superset tensor + """ + i = 0 + for row in subset_tensor: + while i < len(superset_tensor): + if not torch.equal(row, superset_tensor[i]): + i += 1 + else: + break + else: + return False + return True + + +class SimpleLinear(nn.Module): + r"""Model with only Linear layers without biases, some wrapped in a Sequential, + some following the Sequential. Used to test basic pruned Linear-Linear fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Linear(7, 5, bias=False), + nn.Linear(5, 6, bias=False), + nn.Linear(6, 4, bias=False), + ) + self.linear1 = nn.Linear(4, 4, bias=False) + self.linear2 = nn.Linear(4, 10, bias=False) + + def forward(self, x): + x = self.seq(x) + x = self.linear1(x) + x = self.linear2(x) + return x + + +class LinearBias(nn.Module): + r"""Model with only Linear layers, alternating layers with biases, + wrapped in a Sequential. Used to test pruned Linear-Bias-Linear fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Linear(7, 5, bias=True), + nn.Linear(5, 6, bias=False), + nn.Linear(6, 3, bias=True), + nn.Linear(3, 3, bias=True), + nn.Linear(3, 10, bias=False), + ) + + def forward(self, x): + x = self.seq(x) + return x + + +class LinearActivation(nn.Module): + r"""Model with only Linear layers, some with bias, some in a Sequential and some following. + Activation functions modules in between each Linear in the Sequential, and each outside layer. + Used to test pruned Linear(Bias)-Activation-Linear fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Linear(7, 5, bias=True), + nn.ReLU(), + nn.Linear(5, 6, bias=False), + nn.Tanh(), + nn.Linear(6, 4, bias=True), + ) + self.linear1 = nn.Linear(4, 3, bias=True) + self.act1 = nn.ReLU() + self.linear2 = nn.Linear(3, 10, bias=False) + self.act2 = nn.Tanh() + + def forward(self, x): + x = self.seq(x) + x = self.linear1(x) + x = self.act1(x) + x = self.linear2(x) + x = self.act2(x) + return x + + +class LinearActivationFunctional(nn.Module): + r"""Model with only Linear layers, some with bias, some in a Sequential and some following. + Activation functions modules in between each Linear in the Sequential, and functional + activationals are called in between each outside layer. + Used to test pruned Linear(Bias)-Activation-Linear fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Linear(7, 5, bias=True), + nn.ReLU(), + nn.Linear(5, 6, bias=False), + nn.ReLU(), + nn.Linear(6, 4, bias=True), + ) + self.linear1 = nn.Linear(4, 3, bias=True) + self.linear2 = nn.Linear(3, 8, bias=False) + self.linear3 = nn.Linear(8, 10, bias=False) + self.act1 = nn.ReLU() + + def forward(self, x): + x = self.seq(x) + x = self.linear1(x) + x = F.relu(x) + x = self.linear2(x) + x = F.relu(x) + x = self.linear3(x) + x = F.relu(x) + return x + + +class SimpleConv2d(nn.Module): + r"""Model with only Conv2d layers, all without bias, some in a Sequential and some following. + Used to test pruned Conv2d-Conv2d fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 32, 3, 1, bias=False), + nn.Conv2d(32, 64, 3, 1, bias=False), + ) + self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=False) + self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=False) + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = self.conv2d2(x) + return x + + +class Conv2dBias(nn.Module): + r"""Model with only Conv2d layers, some with bias, some in a Sequential and some outside. + Used to test pruned Conv2d-Bias-Conv2d fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 32, 3, 1, bias=True), + nn.Conv2d(32, 32, 3, 1, bias=True), + nn.Conv2d(32, 64, 3, 1, bias=False), + ) + self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=True) + self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=False) + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = self.conv2d2(x) + return x + + +class Conv2dActivation(nn.Module): + r"""Model with only Conv2d layers, some with bias, some in a Sequential and some following. + Activation function modules in between each Sequential layer, functional activations called + in-between each outside layer. + Used to test pruned Conv2d-Bias-Activation-Conv2d fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 32, 3, 1, bias=True), + nn.ReLU(), + nn.Conv2d(32, 64, 3, 1, bias=True), + nn.Tanh(), + nn.Conv2d(64, 64, 3, 1, bias=False), + nn.ReLU(), + ) + self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=False) + self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=True) + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = F.relu(x) + x = self.conv2d2(x) + x = F.hardtanh(x) + return x + + +class Conv2dPadBias(nn.Module): + r"""Model with only Conv2d layers, all with bias and some with padding > 0, + some in a Sequential and some following. Activation function modules in between each layer. + Used to test that bias is propagated correctly in the special case of + pruned Conv2d-Bias-(Activation)Conv2d fusion, when the second Conv2d layer has padding > 0.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 32, 3, 1, padding=1, bias=True), + nn.ReLU(), + nn.Conv2d(32, 32, 3, 1, bias=False), + nn.ReLU(), + nn.Conv2d(32, 32, 3, 1, padding=1, bias=True), + nn.ReLU(), + nn.Conv2d(32, 32, 3, 1, padding=1, bias=True), + nn.ReLU(), + nn.Conv2d(32, 64, 3, 1, bias=True), + nn.Tanh(), + ) + self.conv2d1 = nn.Conv2d(64, 48, 3, 1, padding=1, bias=True) + self.act1 = nn.ReLU() + self.conv2d2 = nn.Conv2d(48, 52, 3, 1, padding=1, bias=True) + self.act2 = nn.Tanh() + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = self.act1(x) + x = self.conv2d2(x) + x = self.act2(x) + return x + + +class Conv2dPool(nn.Module): + r"""Model with only Conv2d layers, all with bias, some in a Sequential and some following. + Activation function modules in between each layer, Pool2d modules in between each layer. + Used to test pruned Conv2d-Pool2d-Conv2d fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=True), + nn.MaxPool2d(kernel_size=2, stride=2, padding=1), + nn.ReLU(), + nn.Conv2d(32, 64, kernel_size=3, padding=1, bias=True), + nn.Tanh(), + nn.AvgPool2d(kernel_size=2, stride=2, padding=1), + ) + self.conv2d1 = nn.Conv2d(64, 48, kernel_size=3, padding=1, bias=True) + self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=1) + self.af1 = nn.ReLU() + self.conv2d2 = nn.Conv2d(48, 52, kernel_size=3, padding=1, bias=True) + self.conv2d3 = nn.Conv2d(52, 52, kernel_size=3, padding=1, bias=True) + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = self.maxpool(x) + x = self.af1(x) + x = self.conv2d2(x) + x = F.avg_pool2d(x, kernel_size=2, stride=2, padding=1) + x = F.relu(x) + x = self.conv2d3(x) + return x + + +class Conv2dPoolFlattenFunctional(nn.Module): + r"""Model with Conv2d layers, all with bias, some in a Sequential and some following, and then a Pool2d + and a functional Flatten followed by a Linear layer. + Activation functions and Pool2ds in between each layer also. + Used to test pruned Conv2d-Pool2d-Flatten-Linear fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 3, kernel_size=3, padding=1, bias=True), + nn.MaxPool2d(kernel_size=2, stride=2, padding=1), + nn.ReLU(), + nn.Conv2d(3, 5, kernel_size=3, padding=1, bias=True), + nn.Tanh(), + nn.AvgPool2d(kernel_size=2, stride=2, padding=1), + ) + self.conv2d1 = nn.Conv2d(5, 7, kernel_size=3, padding=1, bias=True) + self.af1 = nn.ReLU() + self.conv2d2 = nn.Conv2d(7, 11, kernel_size=3, padding=1, bias=True) + self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(11, 13, bias=True) + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1) + x = self.af1(x) + x = self.conv2d2(x) + x = self.avg_pool(x) + x = torch.flatten(x, 1) # test functional flatten + x = self.fc(x) + return x + + +class Conv2dPoolFlatten(nn.Module): + r"""Model with Conv2d layers, all with bias, some in a Sequential and some following, and then a Pool2d + and a Flatten module followed by a Linear layer. + Activation functions and Pool2ds in between each layer also. + Used to test pruned Conv2d-Pool2d-Flatten-Linear fusion.""" + + def __init__(self): + super().__init__() + self.seq = nn.Sequential( + nn.Conv2d(1, 3, kernel_size=3, padding=1, bias=True), + nn.MaxPool2d(kernel_size=2, stride=2, padding=1), + nn.ReLU(), + nn.Conv2d(3, 5, kernel_size=3, padding=1, bias=True), + nn.Tanh(), + nn.AvgPool2d(kernel_size=2, stride=2, padding=1), + ) + self.conv2d1 = nn.Conv2d(5, 7, kernel_size=3, padding=1, bias=True) + self.af1 = nn.ReLU() + self.conv2d2 = nn.Conv2d(7, 11, kernel_size=3, padding=1, bias=True) + self.avg_pool = nn.AdaptiveAvgPool2d((2, 2)) + self.flatten = nn.Flatten() + self.fc = nn.Linear(44, 13, bias=True) + + def forward(self, x): + x = self.seq(x) + x = self.conv2d1(x) + x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1) + x = self.af1(x) + x = self.conv2d2(x) + x = self.avg_pool(x) + x = self.flatten(x) + x = self.fc(x) + return x + + +class LSTMLinearModel(nn.Module): + """Container module with an encoder, a recurrent module, and a linear.""" + + def __init__( + self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int + ): + super().__init__() + self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers) + self.linear = nn.Linear(hidden_dim, output_dim) + + def forward(self, input): + output, hidden = self.lstm(input) + decoded = self.linear(output) + return decoded, output + + +class LSTMLayerNormLinearModel(nn.Module): + """Container module with an LSTM, a LayerNorm, and a linear.""" + + def __init__( + self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int + ): + super().__init__() + self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers) + self.norm = nn.LayerNorm(hidden_dim) + self.linear = nn.Linear(hidden_dim, output_dim) + + def forward(self, x): + x, state = self.lstm(x) + x = self.norm(x) + x = self.linear(x) + return x, state diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..dacca2b0326d75d6d18dda32a7b0d1dd8ac0a1b3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_quantization.py @@ -0,0 +1,2685 @@ +r"""Importing this file includes common utility methods and base clases for +checking quantization api and properties of resulting modules. +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.ao.nn.intrinsic.quantized.dynamic as nniqd +import torch.ao.nn.quantized as nnq +import torch.ao.nn.quantized.dynamic as nnqd +from torch.ao.nn.intrinsic import _FusedModule +import torch.distributed as dist +from torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM + +from torch.ao.quantization import ( + QuantType, + default_dynamic_qat_qconfig, + default_embedding_qat_qconfig, + default_symmetric_qnnpack_qat_qconfig, +) +from torch.ao.quantization import QuantWrapper, QuantStub, DeQuantStub, \ + default_qconfig, default_dynamic_qconfig, default_per_channel_qconfig, QConfig, default_observer, default_weight_observer, \ + propagate_qconfig_, convert, get_default_qconfig, quantize_dynamic_jit, quantize_jit, float_qparams_weight_only_qconfig, \ + get_default_qat_qconfig, PerChannelMinMaxObserver, default_dynamic_quant_observer, quantize, \ + QConfigMapping, get_default_qconfig_mapping, get_default_qat_qconfig_mapping +from torch.ao.quantization.quantization_mappings import ( + get_default_dynamic_quant_module_mappings, + get_default_qconfig_propagation_list, + get_default_qat_module_mappings, +) +from torch.testing._internal.common_quantized import ( + override_quantized_engine, +) +from torch.jit.mobile import _load_for_lite_interpreter + +try: + # graph mode quantization based on fx + from torch.ao.quantization.quantize_fx import ( + prepare_fx, + prepare_qat_fx, + convert_fx, + convert_to_reference_fx, + ) + from torch.ao.ns.fx.ns_types import NSSingleResultValuesType, NSSubgraph + from torch.fx.graph import Node + from torch.fx import GraphModule + HAS_FX = True +except ImportError: + HAS_FX = False + +import copy +import io +import functools +import time +import os + +import unittest +import numpy as np +from torch.testing import FileCheck +from typing import Callable, Tuple, Dict, Any, Union, Type, Optional +import torch._dynamo as torchdynamo + +class NodeSpec: + ''' Used for checking GraphModule Node + ''' + def __init__(self, op, target): + ''' + op: call_function | call_module + target: + for call_function, target would be a function + for call_module, target would be the type of PyTorch module + ''' + self.op = op + self.target = target + + @classmethod + def call_function(cls, target): + return NodeSpec('call_function', target) + + @classmethod + def call_method(cls, target): + return NodeSpec('call_method', target) + + @classmethod + def call_module(cls, target): + return NodeSpec('call_module', target) + + def __hash__(self): + return hash((self.op, self.target)) + + def __eq__(self, other): + if not isinstance(other, NodeSpec): + return NotImplemented + + return self.op == other.op and self.target == other.target + + def __repr__(self): + return repr(self.op) + " " + repr(self.target) + +def get_supported_device_types(): + return ['cpu', 'cuda'] if torch.cuda.is_available() and not TEST_WITH_ROCM else ['cpu'] + +def test_only_eval_fn(model, calib_data): + r""" + Default evaluation function takes a torch.utils.data.Dataset or a list of + input Tensors and run the model on the dataset + """ + for inp in calib_data: + output = model(*inp) + +_default_loss_fn = torch.nn.CrossEntropyLoss() +def test_only_train_fn(model, train_data, loss_fn=_default_loss_fn): + r""" + Default train function takes a torch.utils.data.Dataset and train the model + on the dataset + """ + optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + train_loss, correct, total = 0, 0, 0 + for i in range(10): + model.train() + + for data, target in train_data: + optimizer.zero_grad() + output = model(data) + loss = loss_fn(output, target) + loss.backward() + optimizer.step() + train_loss += loss.item() + _, predicted = torch.max(output, 1) + total += target.size(0) + correct += (predicted == target).sum().item() + return train_loss, correct, total + +class AverageMeter: + """Computes and stores the average and current value""" + def __init__(self, name, fmt=':f'): + self.name = name + self.fmt = fmt + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + +def train_one_epoch(model, criterion, optimizer, data_loader, device, ntrain_batches): + model.train() + cnt = 0 + for image, target in data_loader: + start_time = time.time() + print('.', end='') + cnt += 1 + image, target = image.to(device), target.to(device) + output = model(image) + loss = criterion(output, target) + optimizer.zero_grad() + loss.backward() + optimizer.step() + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + if cnt >= ntrain_batches: + return + return + +def ddp_setup(rank, world_size): + os.environ['MASTER_ADDR'] = 'localhost' + os.environ['MASTER_PORT'] = '12355' + + # initialize the process group + dist.init_process_group("gloo", rank=rank, world_size=world_size) + +def ddp_cleanup(): + dist.destroy_process_group() + +def run_ddp(rank, world_size, prepared): + ddp_setup(rank, world_size) + prepared.cuda() + prepared = torch.nn.parallel.DistributedDataParallel(prepared, device_ids=[rank]) + prepared.to(rank) + model_with_ddp = prepared + optimizer = torch.optim.SGD(model_with_ddp.parameters(), lr=0.0001) + train_one_epoch(model_with_ddp, criterion, optimizer, dataset, rank, 1) + ddp_cleanup() + + +def convert_dynamic(module): + convert(module, get_default_dynamic_quant_module_mappings(), inplace=True) + +def prepare_dynamic(model, qconfig_dict=None): + propagate_qconfig_(model, qconfig_dict) + +def _make_conv_test_input( + batch_size, in_channels_per_group, input_feature_map_size, + out_channels_per_group, groups, kernel_size, X_scale, X_zero_point, W_scale, + W_zero_point, use_bias, use_channelwise, +): + in_channels = in_channels_per_group * groups + out_channels = out_channels_per_group * groups + + (X_value_min, X_value_max) = (0, 4) + X_init = torch.randint( + X_value_min, X_value_max, + (batch_size, in_channels,) + input_feature_map_size) + X = X_scale * (X_init - X_zero_point).float() + X_q = torch.quantize_per_tensor( + X, scale=X_scale, zero_point=X_zero_point, dtype=torch.quint8) + + W_scale = W_scale * out_channels + W_zero_point = W_zero_point * out_channels + # Resize W_scale and W_zero_points arrays equal to out_channels + W_scale = W_scale[:out_channels] + W_zero_point = W_zero_point[:out_channels] + # For testing, we use small values for weights and for activations so that + # no overflow occurs in vpmaddubsw instruction. If the overflow occurs in + # qconv implementation and if there is no overflow. + # In reference we can't exactly match the results with reference. + # Please see the comment in qconv implementation file + # aten/src/ATen/native/quantized/cpu/qconv.cpp for more details. + (W_value_min, W_value_max) = (-5, 5) + # The operator expects them in the format + # (out_channels, in_channels/groups,) + kernel_size + W_init = torch.randint( + W_value_min, W_value_max, + (out_channels, in_channels_per_group,) + kernel_size) + b_init = torch.randint(0, 10, (out_channels,)) + + if use_channelwise: + W_shape = (-1, 1) + (1,) * len(kernel_size) + W_scales_tensor = torch.tensor(W_scale, dtype=torch.float) + W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float) + W = W_scales_tensor.reshape(*W_shape) * ( + W_init.float() - W_zero_points_tensor.reshape(*W_shape)).float() + b = X_scale * W_scales_tensor * b_init.float() + W_q = torch.quantize_per_channel( + W, W_scales_tensor.double(), W_zero_points_tensor.long(), 0, + dtype=torch.qint8) + else: + W = W_scale[0] * (W_init - W_zero_point[0]).float() + b = X_scale * W_scale[0] * b_init.float() + W_q = torch.quantize_per_tensor( + W, scale=W_scale[0], zero_point=W_zero_point[0], dtype=torch.qint8) + + return (X, X_q, W, W_q, b if use_bias else None) + +def _make_conv_add_extra_input_tensor(scale, zero_point, sizes): + (X_value_min, X_value_max) = (0, 4) + X_init = torch.randint( + X_value_min, + X_value_max, + sizes # Infer the size of tensor to do the add + ) + X = scale * (X_init - zero_point).float() + X_q = torch.quantize_per_tensor( + X, scale=scale, zero_point=zero_point, dtype=torch.quint8) + return X, X_q + +def skipIfNoFBGEMM(fn): + reason = 'Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs with instruction set support AVX2 or newer.' + if isinstance(fn, type): + if 'fbgemm' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'fbgemm' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoQNNPACK(fn): + reason = 'Quantized operations require QNNPACK.' + if isinstance(fn, type): + if 'qnnpack' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'qnnpack' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if not torch.onnx._CAFFE2_ATEN_FALLBACK: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def withQNNPACKBackend(fn): + # TODO(future PR): consider combining with skipIfNoQNNPACK, + # will require testing of existing callsites + reason = 'Quantized operations require QNNPACK.' + if isinstance(fn, type): + if 'qnnpack' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'qnnpack' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + with override_quantized_engine('qnnpack'): + fn(*args, **kwargs) + + return wrapper + +def skipIfNoONEDNN(fn): + reason = 'Quantized operations require ONEDNN.' + if isinstance(fn, type): + if 'onednn' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'onednn' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoONEDNNBF16(fn): + reason = 'Quantized operations require BF16 support.' + if isinstance(fn, type): + if not torch.ops.mkldnn._is_mkldnn_bf16_supported(): + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if not torch.ops.mkldnn._is_mkldnn_bf16_supported(): + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoX86(fn): + reason = 'Quantized operations require X86.' + if isinstance(fn, type): + if 'x86' not in torch.backends.quantized.supported_engines: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if 'x86' not in torch.backends.quantized.supported_engines: + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoDynamoSupport(fn): + reason = "dynamo doesn't support." + if isinstance(fn, type): + if not torchdynamo.is_dynamo_supported(): + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if not torchdynamo.is_dynamo_supported(): + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoInductorSupport(fn): + reason = "inductor doesn't support." + if isinstance(fn, type): + if not torchdynamo.is_inductor_supported(): + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = reason + return fn + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if not torchdynamo.is_inductor_supported(): + raise unittest.SkipTest(reason) + else: + fn(*args, **kwargs) + return wrapper + +try: + import torchvision # noqa: F401 + HAS_TORCHVISION = True +except ImportError: + HAS_TORCHVISION = False +skip_if_no_torchvision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision") + +def get_script_module(model, tracing, data): + return torch.jit.trace(model, data) if tracing else torch.jit.script(model) + +def lengths_to_offsets(t, offset_type=np.int64, use_begin_offset=True): + """ + Convert lengths to offsets for embedding_bag + """ + tt = np.zeros((t.shape[0] + 1,), dtype=offset_type) + tt[1:] = t + tt = torch.from_numpy(np.cumsum(tt, dtype=offset_type)) + if use_begin_offset: + return tt[:-1] + return tt[1:] + +# QuantizationTestCase used as a base class for testing quantization on modules +class QuantizationTestCase(TestCase): + def setUp(self): + super().setUp() + self.calib_data = [[torch.rand(2, 5, dtype=torch.float)] for _ in range(2)] + self.train_data = [[torch.rand(2, 5, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long)] for _ in range(2)] + self.img_data_1d = [[torch.rand(2, 3, 10, dtype=torch.float)] + for _ in range(2)] + self.img_data_2d = [[torch.rand(1, 3, 10, 10, dtype=torch.float)] + for _ in range(2)] + self.img_data_3d = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float)] + for _ in range(2)] + self.img_data_1d_train = [[torch.rand(2, 3, 10, dtype=torch.float), + torch.randint(0, 1, (1,), dtype=torch.long)] + for _ in range(2)] + self.img_data_2d_train = [[torch.rand(1, 3, 10, 10, dtype=torch.float), + torch.randint(0, 1, (1,), dtype=torch.long)] + for _ in range(2)] + self.img_data_3d_train = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float), + torch.randint(0, 1, (1,), dtype=torch.long)] + for _ in range(2)] + + self.img_data_dict = {1 : self.img_data_1d, + 2 : self.img_data_2d, + 3 : self.img_data_3d} + + # Quant types that produce statically quantized ops + self.static_quant_types = [QuantType.STATIC, QuantType.QAT] + # All quant types for (fx based) graph mode quantization + self.all_quant_types = [QuantType.DYNAMIC, QuantType.STATIC, QuantType.QAT] + + def checkNoPrepModules(self, module): + r"""Checks the module does not contain child + modules for quantization preparation, e.g. + quant, dequant and observer + """ + self.assertFalse(hasattr(module, 'quant')) + self.assertFalse(hasattr(module, 'dequant')) + + def checkNoQconfig(self, module): + r"""Checks the module does not contain qconfig + """ + self.assertFalse(hasattr(module, 'qconfig')) + + for child in module.children(): + self.checkNoQconfig(child) + + def checkHasPrepModules(self, module): + r"""Checks the module contains child + modules for quantization preparation, e.g. + quant, dequant and observer + """ + self.assertTrue(hasattr(module, 'module')) + self.assertTrue(hasattr(module, 'quant')) + self.assertTrue(hasattr(module, 'dequant')) + + def checkObservers(self, module, propagate_qconfig_list=None, prepare_custom_config_dict=None): + r"""Checks the module or module's leaf descendants + have observers in preparation for quantization + """ + if propagate_qconfig_list is None: + propagate_qconfig_list = get_default_qconfig_propagation_list() + if prepare_custom_config_dict is None: + prepare_custom_config_dict = {} + float_to_observed_module_class_mapping = prepare_custom_config_dict.get("float_to_observed_custom_module_class", {}) + + # check if a module is a leaf module, ignoring activation_post_process attribute + def is_leaf_module(module): + submodule_name_count = 0 + for name, _ in module.named_children(): + if name != 'activation_post_process': + submodule_name_count += 1 + return submodule_name_count == 0 + + if hasattr(module, 'qconfig') and module.qconfig is not None and \ + ((is_leaf_module(module) and not isinstance(module, torch.nn.Sequential) + and type(module) in propagate_qconfig_list) or + type(module) in float_to_observed_module_class_mapping.keys()) and \ + not isinstance(module, torch.ao.quantization.DeQuantStub): + self.assertTrue(hasattr(module, 'activation_post_process'), + 'module: ' + str(type(module)) + ' do not have observer') + # we don't need to check observers for child modules of the + # qat modules + if type(module) not in get_default_qat_module_mappings().values() and \ + type(module) not in float_to_observed_module_class_mapping.values() and \ + not isinstance(module, _FusedModule): + for child in module.children(): + if type(child) in [nn.Dropout]: + continue + self.checkObservers(child, propagate_qconfig_list, prepare_custom_config_dict) + + def checkQuantDequant(self, mod): + r"""Checks that mod has nn.Quantize and + nn.DeQuantize submodules inserted + """ + self.assertEqual(type(mod.quant), nnq.Quantize) + self.assertEqual(type(mod.dequant), nnq.DeQuantize) + + def checkWrappedQuantizedLinear(self, mod): + r"""Checks that mod has been swapped for an nnq.Linear + module, the bias is qint32, and that the module + has Quantize and DeQuantize submodules + """ + self.assertEqual(type(mod.module), nnq.Linear) + self.checkQuantDequant(mod) + + def checkQuantizedLinear(self, mod): + self.assertEqual(type(mod), nnq.Linear) + + def checkDynamicQuantizedLinear(self, mod, dtype): + r"""Checks that mod has been swapped for an nnqd.Linear + module, the bias is float. + """ + self.assertEqual(type(mod), nnqd.Linear) + self.assertEqual(mod._packed_params.dtype, dtype) + + def checkDynamicQuantizedLinearRelu(self, mod, dtype): + r"""Checks that mod has been swapped for an nnqd.Linear + module, the bias is float. + """ + self.assertEqual(type(mod), nniqd.LinearReLU) + self.assertEqual(mod._packed_params.dtype, dtype) + + def check_eager_serialization(self, ref_model, loaded_model, x): + # Check state dict serialization and torch.save APIs + model_dict = ref_model.state_dict() + b = io.BytesIO() + torch.save(model_dict, b) + b.seek(0) + loaded_dict = torch.load(b) + loaded_model.load_state_dict(loaded_dict) + ref_out = ref_model(*x) + load_out = loaded_model(*x) + + def check_outputs(ref_out, load_out): + self.assertEqual(ref_out[0], load_out[0]) + if isinstance(ref_out[1], tuple): + self.assertEqual(ref_out[1][0], load_out[1][0]) + self.assertEqual(ref_out[1][1], load_out[1][1]) + else: + self.assertEqual(ref_out[1], load_out[1]) + + check_outputs(ref_out, load_out) + b = io.BytesIO() + torch.save(ref_model, b) + b.seek(0) + loaded = torch.load(b) + load_out = loaded(*x) + check_outputs(ref_out, load_out) + + def check_weight_bias_api(self, ref_model, weight_keys, bias_keys): + weight = ref_model.get_weight() + bias = ref_model.get_bias() + self.assertEqual(weight_keys ^ weight.keys(), set()) + self.assertEqual(bias_keys ^ bias.keys(), set()) + + def checkDynamicQuantizedLSTM(self, mod, reference_module_type, dtype): + r"""Checks that mod has been swapped for an nnqd.LSTM type + module, the bias is float. + """ + wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'} + self.assertEqual(type(mod), reference_module_type) + for packed_params in mod._all_weight_values: + self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype]) + + def checkLinear(self, mod): + self.assertEqual(type(mod), torch.nn.Linear) + + def checkDynamicQuantizedModule(self, mod, reference_module_type, dtype): + r"""Checks that mod has been swapped for an nnqd.Linear + module, the bias is float. + """ + wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'} + self.assertEqual(type(mod), reference_module_type) + if hasattr(mod, '_all_weight_values'): + for packed_params in mod._all_weight_values: + self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype]) + + def checkScriptable(self, orig_mod, calib_data, check_save_load=False): + scripted = torch.jit.script(orig_mod) + self._checkScriptable(orig_mod, scripted, calib_data, check_save_load) + + # Use first calib_data entry as trace input + traced = torch.jit.trace(orig_mod, calib_data[0]) + self._checkScriptable(orig_mod, traced, calib_data, check_save_load) + + # Call this twice: once for a scripted module and once for a traced module + def _checkScriptable(self, orig_mod, script_mod, calib_data, check_save_load): + self._checkModuleCorrectnessAgainstOrig(orig_mod, script_mod, calib_data) + + # Test save/load + buffer = io.BytesIO() + torch.jit.save(script_mod, buffer) + + buffer.seek(0) + loaded_mod = torch.jit.load(buffer) + # Pending __get_state_ and __set_state__ support + # See tracking task https://github.com/pytorch/pytorch/issues/23984 + if check_save_load: + self._checkModuleCorrectnessAgainstOrig(orig_mod, loaded_mod, calib_data) + + def _checkModuleCorrectnessAgainstOrig(self, orig_mod, test_mod, calib_data): + for inp in calib_data: + ref_output = orig_mod(*inp) + scripted_output = test_mod(*inp) + self.assertEqual(scripted_output, ref_output) + + + def checkGraphModeOp(self, module, inputs, quantized_op, tracing=False, debug=False, + check=True, eval_mode=True, dynamic=False, qconfig=None): + if debug: + print('Testing:', str(module)) + qconfig_dict = {'': get_default_qconfig(torch.backends.quantized.engine)} + + if eval_mode: + module = module.eval() + if dynamic: + qconfig_dict = {'': default_dynamic_qconfig if qconfig is None else qconfig} + model = get_script_module(module, tracing, inputs[0]).eval() + if debug: + print('input graph:', model.graph) + models = {} + outputs = {} + for debug in [True, False]: + if dynamic: + models[debug] = quantize_dynamic_jit(model, qconfig_dict, debug=debug) + # make sure it runs + outputs[debug] = models[debug](inputs) + else: + # module under test can contain in-place ops, and we depend on + # input data staying constant for comparisons + inputs_copy = copy.deepcopy(inputs) + models[debug] = quantize_jit( + model, qconfig_dict, test_only_eval_fn, [inputs_copy], inplace=False, + debug=debug) + # make sure it runs + outputs[debug] = models[debug](*inputs[0]) + + if debug: + print('debug graph:', models[True].graph) + print('non debug graph:', models[False].graph) + + if check: + # debug and non-debug option should have the same numerics + self.assertEqual(outputs[True], outputs[False]) + + # non debug graph should produce quantized op + FileCheck().check(quantized_op) \ + .run(models[False].graph) + + return models[False] + + def checkGraphModuleNodes( + self, graph_module, + expected_node=None, + expected_node_occurrence=None, + expected_node_list=None): + """ Check if GraphModule contains the target node + Args: + graph_module: the GraphModule instance we want to check + expected_node, expected_node_occurrence, expected_node_list: + see docs for checkGraphModeFxOp + """ + nodes_in_graph = {} + node_list = [] + modules = dict(graph_module.named_modules(remove_duplicate=False)) + for node in graph_module.graph.nodes: + n = None + if node.op == 'call_function' or node.op == 'call_method': + n = NodeSpec(node.op, node.target) + elif node.op == 'call_module': + n = NodeSpec(node.op, type(modules[node.target])) + + if n is not None: + node_list.append(n) + if n in nodes_in_graph: + nodes_in_graph[n] += 1 + else: + nodes_in_graph[n] = 1 + + if expected_node is not None: + self.assertTrue(expected_node in nodes_in_graph, 'node:' + str(expected_node) + + ' not found in the graph module') + + if expected_node_occurrence is not None: + for expected_node, occurrence in expected_node_occurrence.items(): + if occurrence != 0: + self.assertTrue( + expected_node in nodes_in_graph, + 'Check failed for node:' + str(expected_node) + + ' not found') + self.assertTrue( + nodes_in_graph[expected_node] == occurrence, + 'Check failed for node:' + str(expected_node) + + ' Expected occurrence:' + str(occurrence) + + ' Found occurrence:' + str(nodes_in_graph[expected_node])) + else: + self.assertTrue( + expected_node not in nodes_in_graph, + 'Check failed for node:' + str(expected_node) + + ' expected no occurrence but found') + + if expected_node_list is not None: + cur_index = 0 + for n in node_list: + if cur_index == len(expected_node_list): + return + if n == expected_node_list[cur_index]: + cur_index += 1 + self.assertTrue( + cur_index == len(expected_node_list), + "Check failed for graph:" + + self.printGraphModule(graph_module, print_str=False) + + "Expected ordered list:" + + str(expected_node_list)) + + def printGraphModule(self, graph_module, print_str=True): + modules = dict(graph_module.named_modules(remove_duplicate=False)) + node_infos = [] + for n in graph_module.graph.nodes: + node_info = ' '.join(map(repr, [n.op, n.name, n.target, n.args, n.kwargs])) + if n.op == 'call_module': + node_info += ' module type: ' + repr(type(modules[n.target])) + node_infos.append(node_info) + str_to_print = '\n'.join(node_infos) + if print_str: + print(str_to_print) + return str_to_print + + if HAS_FX: + + def assert_types_for_matched_subgraph_pairs( + self, + matched_subgraph_pairs: Dict[str, Tuple[NSSubgraph, NSSubgraph]], + expected_types: Dict[str, Tuple[Tuple[Callable, Callable], Tuple[Callable, Callable]]], + gm_a: GraphModule, + gm_b: GraphModule, + ) -> None: + """ + Verifies that the types specified in expected_types match + the underlying objects pointed to by the nodes in matched_subgraph_pairs. + + An example successful test case: + + matched_subgraph_pairs = {'x0': (graph_a_conv_0_node, graph_b_conv_0_node)} + expected_types = {'x0': (nn.Conv2d, nnq.Conv2d)} + + The function tests for key equivalence, and verifies types with + instance checks. + """ + + def _get_underlying_op_type( + node: Node, gm: GraphModule + ) -> Union[Callable, str]: + if node.op == 'call_module': + mod = getattr(gm, node.target) + return type(mod) + else: + assert node.op in ('call_function', 'call_method') + return node.target + + self.assertTrue( + len(matched_subgraph_pairs) == len(expected_types), + f'Expected length of results to match, but got {len(matched_subgraph_pairs)} and {len(expected_types)}' + ) + for k, v in expected_types.items(): + expected_types_a, expected_types_b = v + exp_type_start_a, exp_type_end_a = expected_types_a + exp_type_start_b, exp_type_end_b = expected_types_b + subgraph_a, subgraph_b = matched_subgraph_pairs[k] + + act_type_start_a = _get_underlying_op_type(subgraph_a.start_node, gm_a) + act_type_start_b = _get_underlying_op_type(subgraph_b.start_node, gm_b) + act_type_end_a = _get_underlying_op_type(subgraph_a.end_node, gm_a) + act_type_end_b = _get_underlying_op_type(subgraph_b.end_node, gm_b) + types_match = (exp_type_start_a is act_type_start_a) and \ + (exp_type_end_a is act_type_end_a) and \ + (exp_type_start_b is act_type_start_b) and \ + (exp_type_end_b is act_type_end_b) + self.assertTrue( + types_match, + 'Type mismatch at {}: expected {}, got {}'.format( + k, + (exp_type_start_a, exp_type_end_a, exp_type_start_b, exp_type_end_b), + (act_type_start_a, act_type_end_a, act_type_start_b, act_type_end_b)) + ) + + def assert_ns_compare_dict_valid( + self, + act_compare_dict: Dict[str, Dict[str, Dict[str, Any]]], + ) -> None: + """ + Verifies that the act_compare_dict (output of Numeric Suite APIs) is valid: + 1. for each layer, results are recorded for two models + 2. number of seen tensors match + 3. shapes of each pair of seen tensors match + """ + for layer_name, result_type_to_data in act_compare_dict.items(): + for result_type, layer_data in result_type_to_data.items(): + self.assertTrue( + len(layer_data) == 2, + f"Layer {layer_name} does not have exactly two model results.") + model_name_0, model_name_1 = layer_data.keys() + for res_idx in range(len(layer_data[model_name_0])): + layer_data_0 = layer_data[model_name_0][res_idx] + layer_data_1 = layer_data[model_name_1][res_idx] + self.assertTrue( + layer_data_0['type'] == layer_data_0['type'], + f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same type.") + + self.assertTrue( + len(layer_data_0['values']) == + len(layer_data_1['values']), + f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same number of seen Tensors.") + + # F.conv1d weight has rank 3, and toq.conv1d unpacked weight + # has rank 4. For now, skip the length check for conv1d only. + is_weight_functional_conv1d = ( + result_type == NSSingleResultValuesType.WEIGHT.value and + ( + 'conv1d' in layer_data_0['prev_node_target_type'] or + 'conv1d' in layer_data_1['prev_node_target_type'] + ) + ) + if not is_weight_functional_conv1d: + for idx in range(len(layer_data_0['values'])): + values_0 = layer_data_0['values'][idx] + values_1 = layer_data_1['values'][idx] + if isinstance(values_0, torch.Tensor): + self.assertTrue( + values_0.shape == values_1.shape, + f"Layer {layer_name}, {model_name_0} and {model_name_1} " + + f"have a shape mismatch at idx {idx}.") + elif isinstance(values_0, list): + values_0 = values_0[0] + values_1 = values_1[0] + self.assertTrue( + values_0.shape == values_1.shape, + f"Layer {layer_name}, {model_name_0} and {model_name_1} " + + f"have a shape mismatch at idx {idx}.") + else: + assert isinstance(values_0, tuple), \ + f"unhandled type {type(values_0)}" + assert len(values_0) == 2 + assert len(values_0[1]) == 2 + assert values_0[0].shape == values_1[0].shape + assert values_0[1][0].shape == values_1[1][0].shape + assert values_0[1][1].shape == values_1[1][1].shape + + # verify that ref_node_name is valid + ref_node_name_0 = layer_data_0['ref_node_name'] + ref_node_name_1 = layer_data_1['ref_node_name'] + prev_node_name_0 = layer_data_0['prev_node_name'] + prev_node_name_1 = layer_data_1['prev_node_name'] + if layer_data_0['type'] == NSSingleResultValuesType.NODE_OUTPUT.value: + self.assertTrue(ref_node_name_0 == prev_node_name_0) + self.assertTrue(ref_node_name_1 == prev_node_name_1) + elif layer_data_0['type'] == NSSingleResultValuesType.NODE_INPUT.value: + self.assertTrue(ref_node_name_0 != prev_node_name_0) + self.assertTrue(ref_node_name_1 != prev_node_name_1) + + def checkGraphModeFxOp( + self, + model, + inputs, + quant_type, + expected_node=None, + expected_node_occurrence=None, + expected_node_list=None, + is_reference=False, + print_debug_info=False, + custom_qconfig_dict=None, + prepare_expected_node=None, + prepare_expected_node_occurrence=None, + prepare_expected_node_list=None, + prepare_custom_config=None, + backend_config=None): + """ Quantizes model with graph mode quantization on fx and check if the + quantized model contains the quantized_node + + Args: + model: floating point torch.nn.Module + inputs: one positional sample input arguments for model + expected_node: NodeSpec + e.g. NodeSpec.call_function(torch.quantize_per_tensor) + expected_node_occurrence: a dict from NodeSpec to + expected number of occurrences (int) + e.g. {NodeSpec.call_function(torch.quantize_per_tensor) : 1, + NodeSpec.call_method('dequantize'): 1} + expected_node_list: a list of NodeSpec, used to check the order + of the occurrence of Node + e.g. [NodeSpec.call_function(torch.quantize_per_tensor), + NodeSpec.call_module(nnq.Conv2d), + NodeSpec.call_function(F.hardtanh_), + NodeSpec.call_method('dequantize')] + is_reference: if True, enables reference mode + print_debug_info: if True, prints debug info + custom_qconfig_dict: overrides default qconfig_dict + prepare_expected_node: same as expected_node, but for prepare + prepare_expected_node_occurrence: same as + expected_node_occurrence, but for prepare + prepare_expected_node_list: same as expected_node_list, but + for prepare + + Returns: + A dictionary with the following structure: + { + "prepared": ..., # the prepared model + "quantized": ..., # the quantized non-reference model + "quantized_reference": ..., # the quantized reference model + "result": ..., # the result for either quantized or + # quantized_reference model depending on the + # is_reference argument + } + """ + # TODO: make img_data a single example instead of a list + if type(inputs) == list: + inputs = inputs[0] + + if quant_type == QuantType.QAT: + qconfig_mapping = get_default_qat_qconfig_mapping(torch.backends.quantized.engine) + model.train() + elif quant_type == QuantType.STATIC: + qconfig_mapping = get_default_qconfig_mapping(torch.backends.quantized.engine) + model.eval() + else: + qconfig = default_dynamic_qconfig + qconfig_mapping = QConfigMapping().set_global(qconfig) + model.eval() + + if quant_type == QuantType.QAT: + prepare = prepare_qat_fx + else: + prepare = prepare_fx + + # overwrite qconfig_dict with custom_qconfig_dict + if custom_qconfig_dict is not None: + assert type(custom_qconfig_dict) in (QConfigMapping, dict), \ + 'custom_qconfig_dict should be a QConfigMapping or a dict' + if isinstance(custom_qconfig_dict, QConfigMapping): + qconfig_mapping = custom_qconfig_dict + else: + qconfig_mapping = QConfigMapping.from_dict(custom_qconfig_dict) + prepared = prepare( + model, qconfig_mapping, + example_inputs=inputs, + prepare_custom_config=prepare_custom_config, + backend_config=backend_config) + if not quant_type == QuantType.DYNAMIC: + prepared(*inputs) + + if print_debug_info: + print() + print('quant type:\n', quant_type) + print('original model:\n', model) + print() + print('prepared model:\n', prepared) + + self.checkGraphModuleNodes( + prepared, prepare_expected_node, + prepare_expected_node_occurrence, prepare_expected_node_list) + + prepared_copy = copy.deepcopy(prepared) + qgraph = convert_fx(copy.deepcopy(prepared)) + qgraph_reference = convert_to_reference_fx(copy.deepcopy(prepared)) + result = qgraph(*inputs) + result_reference = qgraph_reference(*inputs) + qgraph_copy = copy.deepcopy(qgraph) + qgraph_reference_copy = copy.deepcopy(qgraph_reference) + + qgraph_to_check = qgraph_reference if is_reference else qgraph + if print_debug_info: + print() + print('quantized model:\n', qgraph_to_check) + self.printGraphModule(qgraph_to_check) + print() + self.checkGraphModuleNodes( + qgraph_to_check, expected_node, expected_node_occurrence, expected_node_list) + return {"prepared": prepared_copy, + "quantized": qgraph_copy, + "quantized_reference": qgraph_reference_copy, + "quantized_output": result, + "quantized_reference_output": result_reference} + + + def checkEmbeddingSerialization(self, qemb, num_embeddings, embedding_dim, indices, offsets, + set_qconfig, is_emb_bag, dtype=torch.quint8): + # Test serialization of dynamic EmbeddingBag module using state_dict + if is_emb_bag: + inputs = [indices, offsets] + else: + inputs = [indices] + emb_dict = qemb.state_dict() + b = io.BytesIO() + torch.save(emb_dict, b) + b.seek(0) + loaded_dict = torch.load(b) + embedding_unpack = torch.ops.quantized.embedding_bag_unpack + # Check unpacked weight values explicitly + for key in emb_dict: + if isinstance(emb_dict[key], torch._C.ScriptObject): + assert isinstance(loaded_dict[key], torch._C.ScriptObject) + emb_weight = embedding_unpack(emb_dict[key]) + loaded_weight = embedding_unpack(loaded_dict[key]) + self.assertEqual(emb_weight, loaded_weight) + + # Check state dict serialization and torch.save APIs + if is_emb_bag: + loaded_qemb = nnq.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim, + include_last_offset=True, mode='sum', dtype=dtype) + else: + loaded_qemb = nnq.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim, dtype=dtype) + self.check_eager_serialization(qemb, loaded_qemb, inputs) + + loaded_qemb.load_state_dict(loaded_dict) + self.assertEqual(embedding_unpack(qemb._packed_params._packed_weight), + embedding_unpack(loaded_qemb._packed_params._packed_weight)) + + + # Test JIT serialization + self.checkScriptable(qemb, [inputs], check_save_load=True) + + # Test from_float call + if is_emb_bag: + float_embedding = torch.nn.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim, + include_last_offset=True, scale_grad_by_freq=False, mode='sum') + else: + float_embedding = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim) + + if set_qconfig: + float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype, + qscheme=torch.per_channel_affine_float_qparams, + ch_axis=0) + float_embedding.qconfig = QConfig(activation=default_dynamic_quant_observer, + weight=float_qparams_observer) + + prepare_dynamic(float_embedding) + + float_embedding(*inputs) + if is_emb_bag: + q_embeddingbag = nnq.EmbeddingBag.from_float(float_embedding) + expected_name = "QuantizedEmbeddingBag" + else: + q_embeddingbag = nnq.Embedding.from_float(float_embedding) + expected_name = "QuantizedEmbedding" + + q_embeddingbag(*inputs) + + self.assertTrue(expected_name in str(q_embeddingbag)) + +class QuantizationLiteTestCase(QuantizationTestCase): + def _create_quantized_model(self, model_class: Type[torch.nn.Module], **kwargs): + # Creates quantized model for testing mobile script modules + qengine = "qnnpack" + with override_quantized_engine(qengine): + qconfig = torch.ao.quantization.get_default_qconfig(qengine) + model = model_class(**kwargs) + model = quantize(model, test_only_eval_fn, [self.calib_data]) + + return model + + def _compare_script_and_mobile(self, + model: torch.nn.Module, + input: torch.Tensor): + # Compares the numerical outputs for script and lite modules + qengine = "qnnpack" + with override_quantized_engine(qengine): + script_module = torch.jit.script(model) + script_module_result = script_module(input) + + max_retry = 5 + for retry in range(1, max_retry + 1): + # retries `max_retry` times; breaks iff succeeds else throws exception + try: + buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter()) + buffer.seek(0) + mobile_module = _load_for_lite_interpreter(buffer) + + mobile_module_result = mobile_module(input) + + torch.testing.assert_close(script_module_result, mobile_module_result) + mobile_module_forward_result = mobile_module.forward(input) + torch.testing.assert_close(script_module_result, mobile_module_forward_result) + + mobile_module_run_method_result = mobile_module.run_method("forward", input) + torch.testing.assert_close(script_module_result, mobile_module_run_method_result) + except AssertionError as e: + if retry == max_retry: + raise e + else: + continue + break + + +# Below are a series of toy models to use in testing quantization + +class SingleLayerLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class AnnotatedSingleLayerLinearModel(torch.nn.Module): + def __init__(self, qengine='fbgemm'): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.fc1 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) + + def forward(self, x): + x = self.fc1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class SingleLayerLinearDynamicModel(torch.nn.Module): + def __init__(self, qengine='fbgemm'): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearAddModel(nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = torch.add(x, 5) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class RNNDynamicModel(torch.nn.Module): + def __init__(self, mod_type): + super().__init__() + self.qconfig = default_dynamic_qconfig + if mod_type == 'GRU': + self.mod = torch.nn.GRU(2, 2).to(dtype=torch.float) + if mod_type == 'LSTM': + self.mod = torch.nn.LSTM(2, 2).to(dtype=torch.float) + + def forward(self, x): + x = self.mod(x) + return x + +class RNNCellDynamicModel(torch.nn.Module): + def __init__(self, mod_type): + super().__init__() + self.qconfig = default_dynamic_qconfig + if mod_type == 'GRUCell': + self.mod = torch.nn.GRUCell(2, 2).to(dtype=torch.float) + if mod_type == 'LSTMCell': + self.mod = torch.nn.LSTMCell(2, 2).to(dtype=torch.float) + if mod_type == 'RNNReLU': + self.mod = torch.nn.RNNCell(2, 2, nonlinearity='relu').to(dtype=torch.float) + if mod_type == 'RNNTanh': + self.mod = torch.nn.RNNCell(2, 2, nonlinearity='tanh').to(dtype=torch.float) + + def forward(self, x): + x = self.mod(x) + return x + +class LSTMwithHiddenDynamicModel(torch.nn.Module): + def __init__(self, qengine='fbgemm'): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.lstm = torch.nn.LSTM(2, 2).to(dtype=torch.float) + + def forward(self, x, hid): + x, hid = self.lstm(x, hid) + return x, hid + +class ConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + + def forward(self, x): + x = self.conv(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class ConvTransposeModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float) + + def forward(self, x): + x = self.conv(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class AnnotatedConvModel(torch.nn.Module): + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = self.dequant(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class AnnotatedConvTransposeModel(torch.nn.Module): + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = self.dequant(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class ConvBnModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class AnnotatedConvBnModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.qconfig = default_qconfig + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = self.bn(x) + x = self.dequant(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class ConvBnReLUModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class AnnotatedConvBnReLUModel(torch.nn.Module): + def __init__(self, qengine='fbgemm'): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) + self.relu = nn.ReLU(inplace=True) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + x = self.dequant(x) + return x + + def fuse_model(self): + # TODO: remove this check and define two fuse_modules function on this module + if self.training: + torch.ao.quantization.fuse_modules_qat(self, [['conv', 'bn', 'relu']], inplace=True) + else: + torch.ao.quantization.fuse_modules(self, [['conv', 'bn', 'relu']], inplace=True) + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class TwoLayerConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) + self.conv2 = torch.nn.Conv2d(5, 5, 1, bias=False).to(dtype=torch.float) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class TwoLayerLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearModelWithSubmodule(nn.Module): + def __init__(self): + super().__init__() + self.subm = TwoLayerLinearModel() + self.fc = nn.Linear(5, 5) + + def forward(self, x): + x = self.subm(x) + x = self.fc(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.subm.get_example_inputs() + +class AnnotatedTwoLayerLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.fc2 = QuantWrapper(torch.nn.Linear(8, 5).to(dtype=torch.float)) + self.fc2.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm") + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class ActivationsTestModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm") + self.quant = torch.ao.quantization.QuantStub() + self.hardswish = torch.nn.Hardswish().to(dtype=torch.float) + self.elu = torch.nn.ELU().to(dtype=torch.float) + self.dequant = torch.ao.quantization.DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.hardswish(x) + x = self.elu(x) + x = self.dequant(x) + return x + +class LinearReluModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.relu(self.fc(x)) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + + +class LinearReluLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearReluAddModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + self.fc2 = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.relu(x) + x = torch.add(x, 5) + x = self.fc2(x) + self.relu = torch.nn.ReLU() + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearBnLeakyReluModel(torch.nn.Module): + def __init__(self, with_bn=True): + super().__init__() + self.linear = nn.Linear(5, 5) + self.bn1d = nn.BatchNorm1d(5) + self.leaky_relu = nn.LeakyReLU(0.01) + self.with_bn = with_bn + + def forward(self, x): + x = self.linear(x) + if self.with_bn: + x = self.bn1d(x) + x = self.leaky_relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class LinearTanhModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = nn.Linear(5, 5) + self.tanh = nn.Tanh() + + def forward(self, x): + x = self.linear(x) + x = self.tanh(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class ConvBnAddReluModel(torch.nn.Module): + def __init__(self, + with_bn=True, + with_relu=True, + left_conv=True, + two_conv=True, + use_torch_add=True): + super().__init__() + self.conv = nn.Conv2d(5, 5, (2, 2)) + self.conv2 = nn.Conv2d(5, 5, (2, 2)) + self.bn = nn.BatchNorm2d(5) + self.relu = nn.ReLU() + self.with_bn = with_bn + self.with_relu = with_relu + self.two_conv = two_conv + self.left_conv = left_conv + self.use_torch_add = use_torch_add + + def forward(self, x1, x2): + if self.two_conv: + if self.use_torch_add: + if self.with_bn: + x = torch.add(self.bn(self.conv(x1)), self.conv2(x1)) + else: + x = torch.add(self.conv(x1), self.conv2(x1)) + else: + if self.with_bn: + x = self.bn(self.conv(x1)) + self.conv2(x1) + else: + x = self.conv(x1) + self.conv2(x1) + else: + if self.use_torch_add: + if self.left_conv: + if self.with_bn: + x = torch.add(self.bn(self.conv(x1)), x2) + else: + x = torch.add(self.conv(x1), x2) + else: + if self.with_bn: + x = torch.add(x2, self.bn(self.conv(x1))) + else: + x = torch.add(x2, self.conv(x1)) + else: + if self.left_conv: + if self.with_bn: + x = self.bn(self.conv(x1)) + x2 + else: + x = self.conv(x1) + x2 + else: + if self.with_bn: + x = x2 + self.bn(self.conv(x1)) + else: + x = x2 + self.conv(x1) + if self.with_relu: + x = self.relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5, 3, 3), torch.rand(1, 5, 2, 2)) + +# TODO: self.fc should be self.conv +class ConvReluModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.relu(self.fc(x)) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +# TODO: self.fc should be self.conv +class ConvReluConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +# TODO: self.fc should be self.conv +class ConvReluAddModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float) + self.relu = torch.nn.ReLU() + self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.relu(x) + x = torch.add(x, 5) + x = self.fc2(x) + self.relu = torch.nn.ReLU() + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class NormalizationTestModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.quant = torch.ao.quantization.QuantStub() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.layer_norm = torch.nn.LayerNorm(8) + self.group_norm = torch.nn.GroupNorm(2, 8) + self.instance_norm1d = torch.nn.InstanceNorm1d(8) + self.instance_norm2d = torch.nn.InstanceNorm2d(8) + self.instance_norm3d = torch.nn.InstanceNorm3d(8) + + def forward(self, x): + x = self.quant(x) + x = self.fc1(x) + x = self.layer_norm(x) + x = self.group_norm(x.unsqueeze(-1).repeat(1, 1, 3)) + x = self.instance_norm1d(x) + x = self.instance_norm2d(x.unsqueeze(-1)) + x = self.instance_norm3d(x.unsqueeze(-1)) + return x + +class NestedModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = TwoLayerLinearModel() + self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class AnnotatedNestedModel(torch.nn.Module): + def __init__(self, qengine): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = TwoLayerLinearModel() + self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) + self.fc3.qconfig = default_qconfig + self.sub2.fc1 = QuantWrapper(self.sub2.fc1) + if qengine == 'fbgemm': + self.sub2.fc1.qconfig = default_per_channel_qconfig + else: + self.sub2.fc1.qconfig = default_qconfig + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class AnnotatedSubNestedModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = QuantWrapper(TwoLayerLinearModel()) + self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) + self.fc3.qconfig = default_qconfig + self.sub2.qconfig = default_qconfig + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class AnnotatedCustomConfigNestedModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = TwoLayerLinearModel() + self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) + self.fc3.qconfig = default_qconfig + self.sub2.qconfig = default_qconfig + + custom_options = { + 'dtype': torch.quint8, + 'qscheme': torch.per_tensor_affine + } + custom_qconfig = QConfig(activation=default_observer.with_args(**custom_options), + weight=default_weight_observer) + self.sub2.fc1.qconfig = custom_qconfig + + self.sub2.fc1 = QuantWrapper(self.sub2.fc1) + self.sub2.fc2 = QuantWrapper(self.sub2.fc2) + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class QuantSubModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.sub1 = LinearReluModel() + self.sub2 = QuantWrapper(TwoLayerLinearModel()) + self.sub2.qconfig = default_qconfig + self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float) + self.fc3.qconfig = default_qconfig + + def forward(self, x): + x = self.sub1(x) + x = self.sub2(x) + x = self.fc3(x) + return x + +class InnerModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) + self.relu1 = torch.nn.ReLU() + self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) + self.relu2 = torch.nn.ReLU() + + def forward(self, x): + return self.relu2(self.fc2(self.relu1(self.fc1(x)))) + + def fuse_modules(self): + fusable_layers = [] + named_children = list(self.named_children()) + for idx, (current_name, layer) in enumerate(named_children): + if isinstance(layer, torch.nn.Linear): + if idx >= len(named_children) - 1: + break + if isinstance(named_children[idx + 1][1], torch.nn.ReLU): + fusable_layers.append([current_name, + named_children[idx + 1][0]]) + # TODO: remove this check and define two fuse_modules function on this module + if self.training: + torch.ao.quantization.fuse_modules_qat(self, fusable_layers, inplace=True) + else: + torch.ao.quantization.fuse_modules(self, fusable_layers, inplace=True) + +class FunctionalLinear(torch.nn.Module): + def __init__(self): + super().__init__() + self.weight = torch.rand((5, 5)) + self.bias = torch.zeros(5) + + def forward(self, x): + return F.linear(x, self.weight, self.bias) + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 5),) + +class SingleLayerFunctionalLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = FunctionalLinear() + + def forward(self, x): + x = self.linear1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear1.get_example_inputs() + +class TwoLayerFunctionalLinearModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = FunctionalLinear() + self.linear2 = FunctionalLinear() + + def forward(self, x): + x = self.linear1(x) + x = self.linear2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear1.get_example_inputs() + +class FunctionalLinearAddModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = FunctionalLinear() + self.linear2 = FunctionalLinear() + + def forward(self, x): + x = self.linear1(x) + x = torch.add(x, 5) + x = self.linear2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear1.get_example_inputs() + +class FunctionalLinearReluModel(nn.Module): + def __init__(self): + super().__init__() + self.linear = FunctionalLinear() + + def forward(self, x): + x = self.linear(x) + x = F.relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear.get_example_inputs() + +class FunctionalLinearReluLinearModel(nn.Module): + def __init__(self): + super().__init__() + self.linear1 = FunctionalLinear() + self.relu = nn.ReLU() + self.linear2 = FunctionalLinear() + + def forward(self, x): + x = self.linear1(x) + x = self.relu(x) + x = self.linear2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.linear1.get_example_inputs() + +class FunctionalConv2d(torch.nn.Module): + def __init__(self): + super().__init__() + self.weight = torch.rand(3, 3, 3, 3) + self.bias = torch.rand(3) + self.stride = (1, 1) + self.padding = (0, 0) + self.dilation = (1, 1) + self.groups = 1 + + def forward(self, x): + return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + def get_example_inputs(self) -> Tuple[Any, ...]: + return (torch.rand(1, 3, 5, 5),) + +class SingleLayerFunctionalConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = FunctionalConv2d() + + def forward(self, x): + x = self.conv1(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.conv1.get_example_inputs() + +class TwoLayerFunctionalConvModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = FunctionalConv2d() + self.conv2 = FunctionalConv2d() + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.conv1.get_example_inputs() + +class FunctionalConvReluModel(nn.Module): + def __init__(self): + super().__init__() + self.conv = FunctionalConv2d() + + def forward(self, x): + x = self.conv(x) + x = F.relu(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.conv.get_example_inputs() + +class FunctionalConvReluConvModel(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = FunctionalConv2d() + self.relu = nn.ReLU() + self.conv2 = FunctionalConv2d() + + def forward(self, x): + x = self.conv1(x) + x = self.relu(x) + x = self.conv2(x) + return x + + def get_example_inputs(self) -> Tuple[Any, ...]: + return self.conv1.get_example_inputs() + +class SkipQuantModel(torch.nn.Module): + r"""We can skip quantization by explicitly + setting qconfig of a submodule to None + """ + def __init__(self): + super().__init__() + self.sub = InnerModule() + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + return self.fc(self.sub(x)) + + def fuse_modules(self): + self.sub.fuse_modules() + +class AnnotatedSkipQuantModel(torch.nn.Module): + r"""We can skip quantization by explicitly + setting qconfig of a submodule to None + """ + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) + self.sub = QuantWrapper(InnerModule()) + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + # don't quantize this fc + self.fc.qconfig = None + + def forward(self, x): + return self.fc(self.sub(x)) + + def fuse_modules(self): + self.sub.module.fuse_modules() + +class QuantStubModel(torch.nn.Module): + r"""A Module with manually inserted `QuantStub` and `DeQuantStub` + """ + def __init__(self): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack") + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) + + def forward(self, x): + x = self.quant(x) + x = self.fc(x) + return self.dequant(x) + +class ManualLinearQATModel(torch.nn.Module): + r"""A Module with manually inserted `QuantStub` and `DeQuantStub` + """ + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine) + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float) + + def forward(self, x): + x = self.quant(x) + x = self.fc1(x) + x = self.fc2(x) + return self.dequant(x) + +class ManualDropoutQATModel(torch.nn.Module): + r"""A Module with manually inserted `QuantStub` and `DeQuantStub` + """ + def __init__(self, qengine): + super().__init__() + self.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine) + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float) + self.dropout = torch.nn.Dropout(0.5) + + def forward(self, x): + x = self.quant(x) + x = self.fc1(x) + x = self.dropout(x) + return self.dequant(x) + +class ManualLinearDynamicQATModel(torch.nn.Module): + r"""A Module that uses a dynamic QAT by default. + """ + def __init__(self, qconfig=None): + super().__init__() + self.qconfig = qconfig or default_dynamic_qat_qconfig + self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float) + + def forward(self, x): + x = self.fc1(x) + x = self.fc2(x) + return x + +class ManualConvLinearQATModel(torch.nn.Module): + r"""A module with manually inserted `QuantStub` and `DeQuantStub` + and contains both linear and conv modules + """ + def __init__(self, qconfig=None): + super().__init__() + self.qconfig = qconfig if qconfig else torch.ao.quantization.get_default_qat_qconfig("qnnpack") + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.conv = torch.nn.Conv2d(3, 1, kernel_size=3).to(dtype=torch.float) + self.fc1 = torch.nn.Linear(64, 10).to(dtype=torch.float) + self.fc2 = torch.nn.Linear(10, 10).to(dtype=torch.float) + + def forward(self, x): + x = self.quant(x) + x = self.conv(x) + x = x.view(-1, 64).contiguous() + x = self.fc1(x) + x = self.fc2(x) + return self.dequant(x) + +class ManualConvLinearSymmQATModel(ManualConvLinearQATModel): + r"""Same as ManualConvLinearQATModule but with Symmetric Quantization. + Supported only with qnnpack. + """ + def __init__(self): + super().__init__(default_symmetric_qnnpack_qat_qconfig) + +class ManualEmbeddingBagLinear(nn.Module): + def __init__(self): + super().__init__() + self.emb = nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, mode='sum') + self.emb.qconfig = default_embedding_qat_qconfig + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.linear = nn.Linear(12, 1).to(dtype=torch.float) + self.qconfig = get_default_qat_qconfig("qnnpack") + + def forward(self, input: torch.Tensor, offsets: Optional[torch.Tensor] = None, + per_sample_weights: Optional[torch.Tensor] = None): + x = self.emb(input, offsets, per_sample_weights) + x = self.quant(x) + x = self.linear(x) + return self.dequant(x) + +class DeFusedEmbeddingBagLinear(nn.Module): + r"""A module to simulate QAT embedding bag with a linear layer, + this module uses a separate embedding and bagging op, similar + to that which is described in the EmbeddingBag documentation. + + https://pytorch.org/docs/stable/generated/torch.nn.EmbeddingBag.html + """ + def __init__(self) -> None: + super().__init__() + self.emb = nn.Embedding(num_embeddings=10, embedding_dim=12) + self.emb.qconfig = default_embedding_qat_qconfig + self.bagging_op = torch.sum + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.linear = nn.Linear(12, 1).to(dtype=torch.float) + self.qconfig = get_default_qat_qconfig("qnnpack") + + def forward(self, input: torch.Tensor) -> torch.Tensor: + x = self.bagging_op(self.emb(input), dim=1) + x = self.quant(x) + x = self.linear(x) + return self.dequant(x) + +class SubModelForFusion(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float) + self.bn = nn.BatchNorm2d(2).to(dtype=torch.float) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +class SubModelWithoutFusion(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float) + self.relu = nn.ReLU(inplace=False).to(dtype=torch.float) + + def forward(self, x): + return self.relu(self.conv(x)) + +class ModelForFusion(nn.Module): + def __init__(self, qconfig): + super().__init__() + self.conv1 = nn.Conv2d(3, 2, 1, bias=None).to(dtype=torch.float) + self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float) + self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float) + self.sub1 = SubModelForFusion() + self.sub2 = SubModelWithoutFusion() + self.fc = nn.Linear(36, 10).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + self.qconfig = qconfig + self.conv2 = nn.Conv3d(3, 2, (1, 1, 1), bias=None).to(dtype=torch.float) + self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float) + self.bn2 = nn.BatchNorm3d(2).to(dtype=torch.float) + self.relu3 = nn.ReLU(inplace=True).to(dtype=torch.float) + self.conv3 = nn.Conv1d(3, 3, 2).to(dtype=torch.float) + self.bn3 = nn.BatchNorm1d(3).to(dtype=torch.float) + self.relu4 = nn.ReLU(inplace=True).to(dtype=torch.float) + # don't quantize sub2 + self.sub2.qconfig = None + self.fc.qconfig = None + + def forward(self, x): + x = x.squeeze(2) + x = self.quant(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu4(x) + x = x.unsqueeze(2) + y = x.unsqueeze(2) + x = self.conv1(x) + x = self.bn1(x) + x = self.relu1(x) + x = self.sub1(x) + x = self.dequant(x) + x = self.sub2(x) + x = x.reshape(-1, 36).contiguous() + x = self.fc(x) + y = self.conv2(y) + y = self.relu2(y) + y = self.bn2(y) + y = self.relu3(y) + y = self.dequant(y) + return x + +class ConvBNReLU(nn.Sequential): + def __init__(self): + super().__init__( + nn.Conv2d(3, 3, 1, 1, bias=False), + nn.BatchNorm2d(3), + nn.ReLU(inplace=False) + ) + +class ModelWithSequentialFusion(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 3, 1) + self.relu1 = nn.ReLU(inplace=False) + layers = [] + for i in range(3): + layers.append(ConvBNReLU()) + self.features = nn.Sequential(*layers) + head = [nn.Linear(300, 10), nn.ReLU(inplace=False)] + self.classifier = nn.Sequential(*head) + self.seq = nn.Sequential() + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv1(x) + x = self.relu1(x) + x = self.features(x) + x = torch.reshape(x, (-1, 3 * 10 * 10)) + x = self.classifier(x) + x = self.seq(x) + x = self.dequant(x) + return x + +class ModelForFusionWithBias(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 2, 5, bias=True).to(dtype=torch.float) + self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float) + self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float) + self.conv2 = nn.Conv2d(2, 2, 1, bias=True).to(dtype=torch.float) + self.bn2 = nn.BatchNorm2d(2).to(dtype=torch.float) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self.conv1(x) + x = self.bn1(x) + x = self.relu1(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.dequant(x) + return x + +class ModelForLinearBNFusion(nn.Module): + def __init__(self): + super().__init__() + self.fc = nn.Linear(20, 10) + self.bn = nn.BatchNorm1d(10) + nn.init.uniform_(self.bn.weight) + nn.init.uniform_(self.bn.bias) + + def forward(self, x): + return self.bn(self.fc(x)) + +class DummyObserver(torch.nn.Module): + def calculate_qparams(self): + return 1.0, 0 + + def forward(self, x): + return x + + +class ModelForConvTransposeBNFusion(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.ConvTranspose1d(3, 3, 1) + self.bn1 = nn.BatchNorm1d(3) + self.conv2 = nn.ConvTranspose2d(3, 3, 1) + self.bn2 = nn.BatchNorm2d(3) + self.conv3 = nn.ConvTranspose3d(3, 3, 1) + self.bn3 = nn.BatchNorm3d(3) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = x.unsqueeze(2) + x = self.conv2(x) + x = self.bn2(x) + x = x.unsqueeze(2) + x = self.conv3(x) + x = self.bn3(x) + return x + + +class ModelWithFunctionals(torch.nn.Module): + def __init__(self): + super().__init__() + self.mycat = nnq.FloatFunctional() + self.myadd = nnq.FloatFunctional() + self.myadd_relu = nnq.FloatFunctional() + self.mymatmul = nnq.FloatFunctional() + # Tracing doesnt work yet for c10 ops with scalar inputs + # https://github.com/pytorch/pytorch/issues/27097 + # self.my_scalar_add = nnq.FloatFunctional() + # self.my_scalar_mul = nnq.FloatFunctional() + + def forward(self, x): + y = self.mycat.cat([x, x, x]) + z = self.myadd.add(y, y) + w = self.myadd_relu.add_relu(z, z) + u = self.mymatmul.matmul(w, w.T) + # Tracing doesnt work yet for c10 ops with scalar inputs + # https://github.com/pytorch/pytorch/issues/27097 + # w = self.my_scalar_add.add_scalar(w, -0.5) + # w = self.my_scalar_mul.mul_scalar(w, 0.5) + return u + + +class ResNetBase(torch.nn.Module): + def __init__(self): + super().__init__() + norm_layer = nn.BatchNorm2d + inplanes = 3 + self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.bn1 = norm_layer(inplanes) + self.relu1 = nn.ReLU() + self.relu2 = nn.ReLU() + self.downsample = torch.nn.Identity() + self.myop = nn.quantized.FloatFunctional() + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = torch.nn.Linear(inplanes, 1) + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) + identity = self.downsample(x) + out = self.myop.add(out, identity) + out = self.relu2(out) + out = self.avgpool(out) + out = torch.flatten(out, 1) + out = self.fc(out) + return out + + def fuse_model(self): + # TODO: remove this check and define two fuse_model function on this module + if self.training: + torch.ao.quantization.fuse_modules_qat(self, [['conv1', 'bn1', 'relu1']], inplace=True) + else: + torch.ao.quantization.fuse_modules(self, [['conv1', 'bn1', 'relu1']], inplace=True) + +class ModelMultipleOps(torch.nn.Module): + def __init__(self): + super().__init__() + norm_layer = nn.BatchNorm2d + inplanes = 3 + self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.bn1 = norm_layer(inplanes) + self.relu1 = nn.ReLU() + self.relu2 = nn.ReLU() + self.downsample = torch.nn.Identity() + self.skip_add = nn.quantized.FloatFunctional() + self.cat = nn.quantized.FloatFunctional() + self.avgpool = nn.AdaptiveAvgPool2d((4, 4)) + self.fc = nn.Linear(12, 6) + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) + identity = self.downsample(x) + out = self.skip_add.add(out, identity) + out = self.relu2(out) + out = self.avgpool(out) + out = self.conv2(out) + out = torch.nn.functional.max_pool2d(out, 2, 2) + out = self.cat.cat([out, out]) + out = out.reshape(-1, 3 * 2 * 2) + out = self.fc(out) + return out + +# Model to ensure consistency of fake quant with true quant +# Average pooling and mean operations are not modelled +# accurately with fake-quant so this model does not +# contain those operations +class ModelMultipleOpsNoAvgPool(torch.nn.Module): + def __init__(self): + super().__init__() + norm_layer = nn.BatchNorm2d + inplanes = 3 + self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) + self.bn1 = norm_layer(inplanes) + self.relu1 = nn.ReLU() + self.relu2 = nn.ReLU() + self.skip_add = nn.quantized.FloatFunctional() + self.cat = nn.quantized.FloatFunctional() + self.maxpool = nn.MaxPool2d((4, 4)) + self.fc = nn.Linear(12, 6) + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) + skip = self.conv2(x) + out = self.skip_add.add(out, skip) + out = self.relu2(out) + out = self.maxpool(out) + out = self.conv2(out) + out = torch.nn.functional.max_pool2d(out, 2, 2) + out = self.cat.cat([out, out]) + out = out.reshape(-1, 3 * 2 * 2) + out = self.fc(out) + return out + +class EmbeddingBagModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, + include_last_offset=True, scale_grad_by_freq=False, mode='sum') + + def forward(self, indices, offsets, per_sample_weights): + return self.emb(indices, offsets, per_sample_weights) + +class EmbeddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) + + def forward(self, indices): + return self.emb(indices) + +class EmbeddingWithStaticLinear(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12) + self.fc = torch.nn.Linear(4, 2) + self.emb.qconfig = float_qparams_weight_only_qconfig + self.qconfig = default_qconfig + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, indices, offsets, linear_in): + emb = self.emb(indices, offsets) + q_x = self.quant(linear_in) + fc = self.fc(q_x) + fc = self.dequant(fc) + features = torch.cat([fc] + [emb], dim=1) + return features + +class DenseTopMLP(nn.Module): + + def __init__(self, dense_dim, dense_out, embedding_dim, top_out_in, top_out_out) -> None: + super().__init__() + + self.dense_mlp = nn.Sequential( + nn.Linear(dense_dim, dense_out), + ) + self.top_mlp = nn.Sequential( + nn.Linear(dense_out + embedding_dim, top_out_in), + nn.Linear(top_out_in, top_out_out), + ) + + def forward( + self, + sparse_feature: torch.Tensor, + dense: torch.Tensor, + ) -> torch.Tensor: + dense_feature = self.dense_mlp(dense) + features = torch.cat([dense_feature] + [sparse_feature], dim=1) + + out = self.top_mlp(features) + return out + +# thin wrapper around embedding bag, because tracing inside nn.Embedding +# bag is not supported at the moment and this is top level +class EmbBagWrapper(nn.Module): + def __init__(self, num_embeddings, embedding_dim): + super().__init__() + self.emb_bag = nn.EmbeddingBag(num_embeddings, embedding_dim, mode='sum') + + def forward(self, indices, offsets): + return self.emb_bag(indices, offsets) + +class SparseNNModel(nn.Module): + _NUM_EMBEDDINGS = 10 + _EMBEDDING_DIM = 5 + _DENSE_DIM = 4 + _DENSE_OUTPUT = 2 + _TOP_OUT_IN = 2 + _TOP_OUT_OUT = 2 + _TOP_MLP_DIM = 1 + + def __init__(self) -> None: + super().__init__() + + self.model_sparse = EmbBagWrapper(self._NUM_EMBEDDINGS, self._EMBEDDING_DIM) + self.dense_top = DenseTopMLP( + self._DENSE_DIM, self._DENSE_OUTPUT, self._EMBEDDING_DIM, self._TOP_OUT_IN, + self._TOP_OUT_OUT) + + def forward( + self, + sparse_indices: torch.Tensor, + sparse_offsets: torch.Tensor, + dense: torch.Tensor, + ) -> torch.Tensor: + + sparse_feature = self.model_sparse(sparse_indices, sparse_offsets) + out = self.dense_top(sparse_feature, dense) + + return out + +class TestHelperModules: + class Conv2dPropAnnotaton(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 3, 3) + self.linear = torch.nn.Linear(3, 3) + + def forward(self, x): + x = self.conv(x) + x = x.view(-1, 3) + x = torch.nn.functional.hardtanh(x, -0.5, 0.5) + x = self.linear(x) + return x + + class Conv2dWithObsSharingOps(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 3, 3) + self.hardtanh = torch.nn.Hardtanh() + self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, x): + x = self.conv(x) + x = self.adaptive_avg_pool2d(x) + x = self.hardtanh(x) + x = torch.mean(x) + return x + + class Conv2dWithTwoLinearPermute(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 16, 3) + self.linear1 = torch.nn.Linear(16, 8, bias=False) + self.linear2 = torch.nn.Linear(8, 8) + + def forward(self, x): + conv_out = self.conv(x) + permute_out = torch.permute(conv_out, (0, 2, 3, 1)) + return self.linear2(self.linear1(permute_out)) + + class Conv2dWithTwoLinear(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 16, 3) + self.linear1 = torch.nn.Linear(64, 8, bias=False) + self.linear2 = torch.nn.Linear(8, 8) + + def forward(self, x): + conv_out = self.conv(x) + reshape_out = torch.reshape(conv_out, (2, 64)) + return self.linear2(self.linear1(reshape_out)) + + class ConvLinearWPermute(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 8, 3) + self.linear1 = torch.nn.Linear(8, 8) + + def forward(self, x): + conv_out = self.conv(x) + permute_out = torch.permute(conv_out, (0, 2, 3, 1)) + return self.linear1(permute_out) + + class TwoLinearModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear1 = torch.nn.Linear(8, 16, bias=False) + self.linear2 = torch.nn.Linear(16, 8) + + def forward(self, x): + return self.linear2(self.linear1(x)) + + class ConvMaxPool2d(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(2, 2, 1) + self.pool = torch.nn.MaxPool2d(1, 1) + + def forward(self, x): + x = self.conv(x) + x = self.pool(x) + return x + + class ConvWithAdaptiveAvgPool2d(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv = torch.nn.Conv2d(3, 3, 3) + self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, x): + x = self.conv(x) + x = self.adaptive_avg_pool2d(x) + return x + + class ConvWithBNRelu(torch.nn.Module): + def __init__(self, relu, dim=2, bn=True, bias=True): + super().__init__() + convs = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d} + bns = {1: torch.nn.BatchNorm1d, 2: torch.nn.BatchNorm2d} + self.conv = convs[dim](3, 3, 3, bias=bias) + + if bn: + self.bn = bns[dim](3) + else: + self.bn = torch.nn.Identity() + if relu: + self.relu = torch.nn.ReLU() + else: + self.relu = torch.nn.Identity() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return self.relu(x) + + class Conv2dThenConv1d(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1d = torch.nn.Conv1d(3, 3, 3) + self.conv2d = torch.nn.Conv2d(3, 3, 3) + + def forward(self, x): + x = self.conv2d(x) + x = x.squeeze(0) + x = self.conv1d(x) + return x + + def example_inputs(self): + return (torch.randn(1, 3, 5, 5),) + + class Conv2dWithCat(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(3, 3, 3) + self.conv2 = torch.nn.Conv2d(3, 3, 3) + + def forward(self, x, y): + x = self.conv1(x) + y = self.conv2(y) + z = torch.cat([x, y], dim=1) + return z + + class Conv2dWithTwoCat(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv1 = torch.nn.Conv2d(3, 3, 3) + self.conv2 = torch.nn.Conv2d(3, 3, 3) + + def forward(self, x1, x2, x3, x4): + x1 = self.conv1(x1) + x2 = self.conv2(x2) + y = torch.cat([x1, x2], dim=1) + z = x3 + x4 + w = torch.cat([z, y]) + return w + + class ThreeAdd(torch.nn.Module): + def forward(self, x1, x2, x3, x4): + y = x1 + x2 + z = x3 + x4 + w = y + z + return w + + class EmbeddingModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) + + def forward(self, indices): + return self.emb(indices) + + class EmbeddingConvLinearModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=8) + self.conv = torch.nn.Conv2d(8, 16, (1, 3)) + self.linear = torch.nn.Linear(16, 8) + + def forward(self, indices): + embeddings = self.emb(indices) + embeddings = torch.unsqueeze(embeddings, dim=0) + embeddings = torch.permute(embeddings, (0, 3, 1, 2)) + conv_out = self.conv(embeddings) + conv_out = torch.permute(conv_out, (0, 2, 3, 1)) + conv_out = torch.squeeze(conv_out, dim=0) + return self.linear(conv_out) + + class AddInplaceAdd(torch.nn.Module): + def forward(self, x, y): + x = x + y + x += y + return x + + class MulInplaceMul(torch.nn.Module): + def forward(self, x, y): + x = x * y + x *= y + return x + + class AddMulScalar(torch.nn.Module): + def forward(self, x): + x = x + 3 + x = x * 3 + x += 3 + x *= 3 + return x + + class ConvBnReLU2dAndLinearReLU(torch.nn.Module): + def __init__(self): + super().__init__() + self.conv_bn_relu = TestHelperModules.ConvWithBNRelu(relu=True) + self.linear = torch.nn.Linear(3, 8, bias=False) + self.relu = torch.nn.ReLU() + + def forward(self, x): + x = self.conv_bn_relu(x) + permute_out = torch.permute(x, (0, 2, 3, 1)) + linear_out = self.linear(permute_out) + return linear_out diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py new file mode 100644 index 0000000000000000000000000000000000000000..0db312f2c2093d50f579e08b6e34f0938cc1e169 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_quantized.py @@ -0,0 +1,225 @@ +r"""Importing this file includes common utility methods for checking quantized +tensors and modules. +""" +import numpy as np +import torch +from contextlib import contextmanager +from torch.testing._internal.common_utils import TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_PPC, IS_MACOS, IS_WINDOWS + +supported_qengines = torch.backends.quantized.supported_engines +supported_qengines.remove('none') +# Note: We currently do not run QNNPACK tests on WINDOWS and MACOS as it is flaky. Issue #29326 +# QNNPACK is not supported on PPC +# QNNPACK throws ASAN heap-buffer-overflow error. +if 'qnnpack' in supported_qengines and any([IS_PPC, TEST_WITH_ASAN, TEST_WITH_TSAN, TEST_WITH_UBSAN, IS_MACOS, IS_WINDOWS]): + supported_qengines.remove('qnnpack') + +def _conv_output_shape(input_size, kernel_size, padding, stride, dilation, + output_padding=0): + """Computes the output shape given convolution parameters.""" + return np.floor((input_size + 2 * padding - kernel_size - (kernel_size - 1) + * (dilation - 1)) / stride) + 2 * output_padding + 1 + +# Quantization references +def _quantize(x, scale, zero_point, qmin=None, qmax=None, dtype=np.uint8): + """Quantizes a numpy array.""" + if qmin is None: + qmin = np.iinfo(dtype).min + if qmax is None: + qmax = np.iinfo(dtype).max + qx = np.round(x / scale + zero_point).astype(np.int64) + qx = np.clip(qx, qmin, qmax) + qx = qx.astype(dtype) + return qx + + +def _dequantize(qx, scale, zero_point): + """Dequantizes a numpy array.""" + x = (qx.astype(float) - zero_point) * scale + return x + + +def _requantize(x, multiplier, zero_point, qmin=0, qmax=255, qtype=np.uint8): + """Requantizes a numpy array, i.e., intermediate int32 or int16 values are + converted back to given type""" + qx = (x * multiplier).round() + zero_point + qx = np.clip(qx, qmin, qmax).astype(qtype) + return qx + +def _calculate_dynamic_qparams(X, dtype, reduce_range=False, qscheme=torch.per_tensor_affine): + """Calculate the dynamic quantization parameters (scale, zero_point) + according to the min and max element of the tensor""" + assert qscheme in (torch.per_tensor_affine, torch.per_tensor_symmetric) + if qscheme == torch.per_tensor_symmetric: + assert dtype == torch.qint8 + if isinstance(X, torch.Tensor): + X = X.numpy() + if dtype == torch.qint8: + if reduce_range: + qmin, qmax = -64, 63 + else: + qmin, qmax = -128, 127 + else: # dtype == torch.quint8 + if reduce_range: + qmin, qmax = 0, 127 + else: + qmin, qmax = 0, 255 + min_val = X.min() + max_val = X.max() + is_symmetric = (qscheme == torch.per_tensor_symmetric) + if min_val == max_val: + scale = 1.0 + zero_point = 0 + else: + if is_symmetric: + max_val = max(max_val, -min_val) + min_val = -max_val + scale = (max_val - min_val) / (qmax - qmin) + scale = max(scale, np.finfo(np.float32).eps) + zero_point = 0 + else: + max_val = max(max_val, 0.0) + min_val = min(min_val, 0.0) + scale = (max_val - min_val) / (qmax - qmin) + scale = max(scale, np.finfo(np.float32).eps) + zero_point = qmin - round(min_val / scale) + zero_point = max(qmin, zero_point) + zero_point = min(qmax, zero_point) + return [float(scale), int(zero_point)] + +def _calculate_dynamic_per_channel_qparams(X, dtype): + """Calculate the dynamic quantization parameters (scale, zero_point) + according to the min and max element of the tensor""" + if isinstance(X, torch.Tensor): + X = X.numpy() + qmin, qmax = torch.iinfo(dtype).min, torch.iinfo(dtype).max + n_levels = qmax - qmin + scale = np.zeros(X.shape[0], dtype=np.float64) + zero_point = np.zeros(X.shape[0], dtype=np.int64) + for i in range(zero_point.shape[0]): + min_val = X.min() + max_val = X.max() + if min_val == max_val: + scale[i] = 1.0 + zero_point[i] = 0 + else: + max_val = max(max_val, 0.0) + min_val = min(min_val, 0.0) + scale[i] = (max_val - min_val) / n_levels + scale[i] = max(scale[i], np.finfo(np.float32).eps) + zero_point[i] = qmin - round(min_val / scale[i]) + zero_point[i] = max(qmin, zero_point[i]) + zero_point[i] = min(qmax, zero_point[i]) + + return scale, zero_point + +def _snr(x, x_hat): + """Calculates the signal to noise ratio and returns the signal and noise + power, as well as the SNR in dB. + If the input is a list/tuple this function is called recursively on each + element. The result will have the same nested structure as the inputs. + + Args: + x, x_hat: Either a tensor or a nested list/tuple of tensors. + Returns: + signal, noise, SNR(in dB): Either floats or a nested list of floats + """ + if isinstance(x, (list, tuple)): + assert(len(x) == len(x_hat)) + res = [] + for idx in range(len(x)): + res.append(_snr(x[idx], x_hat[idx])) + return res + if x_hat.is_quantized: + x_hat = x_hat.dequantize() + if x.is_quantized: + x = x.dequantize() + noise = (x - x_hat).norm() + if noise == 0: + return 0.0, float('inf'), float('inf') + signal = x.norm() + snr = signal / noise + snr_db = 20 * snr.log10() + return signal, noise, snr_db + +@contextmanager +def override_quantized_engine(qengine): + previous = torch.backends.quantized.engine + torch.backends.quantized.engine = qengine + try: + yield + finally: + torch.backends.quantized.engine = previous + +@contextmanager +def override_cpu_allocator_for_qnnpack(qengine_is_qnnpack): + try: + if qengine_is_qnnpack: + torch._C._set_default_mobile_cpu_allocator() + yield + finally: + if qengine_is_qnnpack: + torch._C._unset_default_mobile_cpu_allocator() + +# TODO: Update all quantization tests to use this decorator. +# Currently for some of the tests it seems to have inconsistent params +# for fbgemm vs qnnpack. +def override_qengines(qfunction): + def test_fn(*args, **kwargs): + for qengine in supported_qengines: + with override_quantized_engine(qengine): + # qfunction should not return anything. + qfunction(*args, **kwargs) + return test_fn + +def qengine_is_fbgemm(): + return torch.backends.quantized.engine == 'fbgemm' +def qengine_is_qnnpack(): + return torch.backends.quantized.engine == 'qnnpack' +def qengine_is_onednn(): + return torch.backends.quantized.engine == 'onednn' +def qengine_is_x86(): + return torch.backends.quantized.engine == 'x86' + +# Helper function used to simulate per-channel fake-quant against any axis +def _permute_to_axis_zero(X, axis): + new_axis_list = list(range(X.dim())) + new_axis_list[axis] = 0 + new_axis_list[0] = axis + y = X.permute(tuple(new_axis_list)) + return y, new_axis_list + +# Reference method for fake quantize +# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64 +def _fake_quantize_per_channel_affine_reference(X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max): + dtype = X.dtype + X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis) + res = torch.zeros_like(X) + + for i in range(X.size()[0]): + res[i] = (torch.clamp(torch.round(X[i] * (1.0 / per_channel_scale[i]) + + per_channel_zero_point[i]), quant_min, quant_max) - per_channel_zero_point[i]) * per_channel_scale[i] + + out = res.permute(tuple(permute_axis_list)) + return out.to(dtype) + +# Reference method for the gradient of the fake quantize operator +# Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64 +def _fake_quantize_per_channel_affine_grad_reference(dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max): + dtype = X.dtype + X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis) + Xq = torch.zeros_like(X) + for i in range(X.size()[0]): + Xq[i] = torch.round(X[i] * (1.0 / per_channel_scale[i]) + per_channel_zero_point[i]) + Xq = Xq.permute(tuple(permute_axis_list)) + mask = (Xq >= quant_min) * (Xq <= quant_max) + res = torch.zeros_like(dY) + res[mask] = dY[mask] + return res.to(dtype) + +def to_tensor(X, device): + if not isinstance(X, torch.Tensor): + X = torch.tensor(X) + else: + X = X.clone().detach() + return X.to(device=torch.device(device), dtype=torch.float32) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_subclass.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_subclass.py new file mode 100644 index 0000000000000000000000000000000000000000..429fcaf86ab367b53e6150521b97af2590ce9c5a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_subclass.py @@ -0,0 +1,219 @@ +import torch +from copy import deepcopy +from torch.utils._pytree import tree_map + +# TODO: Move LoggingTensor here. +from torch.testing._internal.logging_tensor import LoggingTensor + + +# Base class for wrapper-style tensors. +class WrapperTensor(torch.Tensor): + @staticmethod + def __new__(cls, *args, **kwargs): + t, kwargs = cls.get_wrapper_properties(*args, **kwargs) + if "size" not in kwargs: + size = t.size() + else: + size = kwargs["size"] + del kwargs["size"] + if "dtype" not in kwargs: + kwargs["dtype"] = t.dtype + if "layout" not in kwargs: + kwargs["layout"] = t.layout + if "device" not in kwargs: + kwargs["device"] = t.device + if "requires_grad" not in kwargs: + kwargs["requires_grad"] = False + # Ignore memory_format and pin memory for now as I don't know how to + # safely access them on a Tensor (if possible??) + + wrapper = torch.Tensor._make_wrapper_subclass(cls, size, **kwargs) + wrapper._validate_methods() + return wrapper + + @classmethod + def get_wrapper_properties(cls, *args, **kwargs): + # Should return both an example Tensor and a dictionary of kwargs + # to override any of that example Tensor's properly. + # This is very similar to the `t.new_*(args)` API + raise NotImplementedError("You need to implement get_wrapper_properties") + + def _validate_methods(self): + # Skip this if not in debug mode? + # Changing these on the python side is wrong as it would not be properly reflected + # on the c++ side + # This doesn't catch attributes set in the __init__ + forbidden_overrides = ["size", "stride", "dtype", "layout", "device", "requires_grad"] + for el in forbidden_overrides: + if getattr(self.__class__, el) is not getattr(torch.Tensor, el): + raise RuntimeError(f"Subclass {self.__class__.__name__} is overwriting the " + f"property {el} but this is not allowed as such change would " + "not be reflected to c++ callers.") + + +class DiagTensorBelow(WrapperTensor): + @classmethod + def get_wrapper_properties(cls, diag, requires_grad=False): + assert diag.ndim == 1 + return diag, {"size": diag.size() + diag.size(), "requires_grad": requires_grad} + + def __init__(self, diag, requires_grad=False): + self.diag = diag + + handled_ops = {} + + # We disable torch function here to avoid any unwanted wrapping of the output + __torch_function__ = torch._C._disabled_torch_function_impl + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + if not all(issubclass(cls, t) for t in types): + return NotImplemented + + # For everything else, call the handler: + fn = cls.handled_ops.get(func.__name__, None) + if fn: + return fn(*args, **(kwargs or {})) + else: + # Note that here, because we don't need to provide the autograd formulas + # we can have a default "fallback" that creates a plain Tensor based + # on the diag elements and calls the func again. + + def unwrap(e): + return e.diag.diag() if isinstance(e, DiagTensorBelow) else e + + def wrap(e): + if isinstance(e, torch.Tensor) and e.ndim == 1: + return DiagTensorBelow(e) + if isinstance(e, torch.Tensor) and e.ndim == 2 and e.count_nonzero() == e.diag().count_nonzero(): + return DiagTensorBelow(e.diag()) + return e + + rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {}))) + return rs + + def __repr__(self): + return super().__repr__(tensor_contents=f"diag={self.diag}") + + +class SparseTensor(WrapperTensor): + @classmethod + def get_wrapper_properties(cls, size, values, indices, requires_grad=False): + assert values.device == indices.device + return values, {"size": size, "requires_grad": requires_grad} + + def __init__(self, size, values, indices, requires_grad=False): + self.values = values + self.indices = indices + + def __repr__(self): + return super().__repr__(tensor_contents=f"values={self.values}, indices={self.indices}") + + def sparse_to_dense(self): + res = torch.zeros(self.size(), dtype=self.values.dtype) + res[self.indices.unbind(1)] = self.values + return res + + @staticmethod + def from_dense(t): + indices = t.nonzero() + values = t[indices.unbind(1)] + return SparseTensor(t.size(), values, indices) + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + func_name = f"{func.__module__}.{func.__name__}" + + res = cls._try_call_special_impl(func_name, args, kwargs) + if res is not NotImplemented: + return res + + # Otherwise, use a default implementation that construct dense + # tensors and use that to compute values + def unwrap(e): + return e.sparse_to_dense() if isinstance(e, SparseTensor) else e + + # Wrap back all Tensors into our custom class + def wrap(e): + # Check for zeros and use that to get indices + return SparseTensor.from_dense(e) if isinstance(e, torch.Tensor) else e + + rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {}))) + return rs + + # To show how things happen later + def __rmul__(self, other): + return super().__rmul__(other) + + _SPECIAL_IMPLS = {} + + @classmethod + def _try_call_special_impl(cls, func, args, kwargs): + if func not in cls._SPECIAL_IMPLS: + return NotImplemented + return cls._SPECIAL_IMPLS[func](args, kwargs) + + +# Example non-wrapper subclass that stores extra state. +class NonWrapperTensor(torch.Tensor): + def __new__(cls, data): + t = torch.Tensor._make_subclass(cls, data) + t.extra_state = { + 'last_func_called': None + } + return t + + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + result = super().__torch_function__(func, types, args, kwargs) + + if isinstance(result, cls): + # Do something with the extra state. For the example here, just store the name of the + # last function called (skip for deepcopy so the copy has the same extra state). + if func is torch.Tensor.__deepcopy__: + result.extra_state = deepcopy(args[0].extra_state) + else: + result.extra_state = { + 'last_func_called': func.__name__, + } + + return result + + # new_empty() must be defined for deepcopy to work + def new_empty(self, shape): + return type(self)(torch.empty(shape)) + + +# Class used to store info about subclass tensors used in testing. +class SubclassInfo: + + __slots__ = ['name', 'create_fn', 'closed_under_ops'] + + def __init__(self, name, create_fn, closed_under_ops=True): + self.name = name + self.create_fn = create_fn # create_fn(shape) -> tensor instance + self.closed_under_ops = closed_under_ops + + +subclass_db = { + torch.Tensor: SubclassInfo( + 'base_tensor', create_fn=torch.randn + ), + NonWrapperTensor: SubclassInfo( + 'non_wrapper_tensor', + create_fn=lambda shape: NonWrapperTensor(torch.randn(shape)) + ), + LoggingTensor: SubclassInfo( + 'logging_tensor', + create_fn=lambda shape: LoggingTensor(torch.randn(shape)) + ), + SparseTensor: SubclassInfo( + 'sparse_tensor', + create_fn=lambda shape: SparseTensor.from_dense(torch.randn(shape).relu()) + ), + DiagTensorBelow: SubclassInfo( + 'diag_tensor_below', + create_fn=lambda shape: DiagTensorBelow(torch.randn(shape)), + closed_under_ops=False # sparse semantics + ), +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..67e9e5992a3bb9b36283c1602c8b7bbddbc91b09 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py @@ -0,0 +1,4973 @@ +r"""Importing this file must **not** initialize CUDA context. test_distributed +relies on this assumption to properly run. This means that when this is imported +no CUDA calls shall be made, including torch.cuda.device_count(), etc. + +torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported. +""" + +import argparse +import contextlib +import copy +import ctypes +import errno +import functools +import gc +import inspect +import io +import json +import logging +import math +import operator +import os +import platform +import random +import re +import shutil +import signal +import socket +import subprocess +import sys +import tempfile +import threading +import time +import types +import unittest +import warnings +from collections.abc import Mapping, Sequence +from contextlib import closing, contextmanager +from copy import deepcopy +from dataclasses import dataclass +from enum import Enum +from functools import partial, wraps +from itertools import product, chain +from pathlib import Path +from statistics import mean +from typing import ( + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Tuple, + Type, + TypeVar, + Union, +) +from unittest.mock import MagicMock + +import expecttest +import numpy as np + +import __main__ # type: ignore[import] +import torch +import torch.backends.cudnn +import torch.backends.mkl +import torch.backends.mps +import torch.backends.xnnpack +import torch.cuda +from torch import Tensor +from torch._C import ScriptDict, ScriptList # type: ignore[attr-defined] +from torch._utils_internal import get_writable_path +from torch.nn import ( + ModuleDict, + ModuleList, + ParameterDict, + ParameterList, + Sequential, +) +from torch.onnx import ( + register_custom_op_symbolic, + unregister_custom_op_symbolic, +) +from torch.testing import make_tensor +from torch.testing._comparison import ( + BooleanPair, + NonePair, + NumberPair, + Pair, + TensorLikePair, +) +from torch.testing._comparison import not_close_error_metas +from torch.testing._internal.common_dtype import get_all_dtypes +import torch.utils._pytree as pytree + +from .composite_compliance import no_dispatch + + +# Class to keep track of test flags configurable by environment variables. +# Flags set here are intended to be read-only and should not be modified after +# definition. +# TODO: Expand this class to handle abritrary settings in addition to boolean flags? +class TestEnvironment: + # Set of env vars to set for the repro command that is output on test failure. + # Specifically, this includes env vars that are set to non-default values and + # are not implied. Maps from env var name -> value (int) + repro_env_vars: dict = {} + + # Defines a flag usable throughout the test suite, determining its value by querying + # the specified environment variable. + # + # Args: + # name (str): The name of the flag. A global variable with this name will be set + # for convenient access throughout the test suite. + # env_var (str): The name of the primary environment variable from which to + # determine the value of this flag. If this is None or the environment variable + # is unset, the default value will be used unless otherwise implied (see + # implied_by_fn). Default: None + # default (bool): The default value to use for the flag if unset by the environment + # variable and unimplied. Default: False + # include_in_repro (bool): Indicates whether this flag should be included in the + # repro command that is output on test failure (i.e. whether it is possibly + # relevant to reproducing the test failure). Default: True + # enabled_fn (Callable): Callable returning whether the flag should be enabled + # given the environment variable value and the default value. Default: Lambda + # requiring "0" to disable if on by default OR "1" to enable if off by default. + # implied_by_fn (Callable): Thunk returning a bool to imply this flag as enabled + # by something outside of its primary environment variable setting. For example, + # this can be useful if the value of another environment variable implies the flag + # as enabled. Default: Lambda returning False to indicate no implications. + @staticmethod + def def_flag( + name, + env_var=None, + default=False, + include_in_repro=True, + enabled_fn=lambda env_var_val, default: ( + (env_var_val != "0") if default else (env_var_val == "1")), + implied_by_fn=lambda: False, + ): + enabled = default + if env_var is not None: + env_var_val = os.getenv(env_var) + enabled = enabled_fn(env_var_val, default) + implied = implied_by_fn() + enabled = enabled or implied + if include_in_repro and (env_var is not None) and (enabled != default) and not implied: + TestEnvironment.repro_env_vars[env_var] = env_var_val + + # export flag globally for convenience + assert name not in globals(), f"duplicate definition of flag '{name}'" + globals()[name] = enabled + + # Returns a string prefix usable to set environment variables for any test + # settings that should be explicitly set to match this instantiation of the + # test suite. + # Example: "PYTORCH_TEST_WITH_ASAN=1 PYTORCH_TEST_WITH_ROCM=1" + @staticmethod + def repro_env_var_prefix() -> str: + return " ".join([f"{env_var}={value}" + for env_var, value in TestEnvironment.repro_env_vars.items()]) + + +log = logging.getLogger(__name__) +torch.backends.disable_global_flags() + +FILE_SCHEMA = "file://" +if sys.platform == 'win32': + FILE_SCHEMA = "file:///" + +# NB: This flag differs semantically from others in that setting the env var to any +# non-empty value will cause it to be true: +# CI=1, CI="true", CI=0, etc. all set the flag to be true. +# CI= and an unset CI set the flag to be false. +# GitHub sets the value to CI="true" to enable it. +TestEnvironment.def_flag("IS_CI", env_var="CI", include_in_repro=False, + enabled_fn=lambda env_var_value, _: bool(env_var_value)) +TestEnvironment.def_flag( + "IS_SANDCASTLE", + env_var="SANDCASTLE", + implied_by_fn=lambda: os.getenv("TW_JOB_USER") == "sandcastle", + include_in_repro=False) + +_is_fbcode_default = ( + hasattr(torch._utils_internal, "IS_FBSOURCE") and + torch._utils_internal.IS_FBSOURCE +) + +TestEnvironment.def_flag("IS_FBCODE", env_var="PYTORCH_TEST_FBCODE", + default=_is_fbcode_default, + include_in_repro=False) +TestEnvironment.def_flag("IS_REMOTE_GPU", env_var="PYTORCH_TEST_REMOTE_GPU", + include_in_repro=False) + +TestEnvironment.def_flag("RETRY_TEST_CASES", env_var="PYTORCH_RETRY_TEST_CASES", + include_in_repro=False) +TestEnvironment.def_flag("OVERRIDE_FLAKY_SIGNAL", env_var="PYTORCH_OVERRIDE_FLAKY_SIGNAL", + include_in_repro=False) +TestEnvironment.def_flag( + "DISABLE_RUNNING_SCRIPT_CHK", + env_var="PYTORCH_DISABLE_RUNNING_SCRIPT_CHK", + include_in_repro=False) +# NB: enabled by default unless in an fbcode context. +TestEnvironment.def_flag("PRINT_REPRO_ON_FAILURE", env_var="PYTORCH_PRINT_REPRO_ON_FAILURE", + default=(not IS_FBCODE), include_in_repro=False) + +DEFAULT_DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json' +DEFAULT_SLOW_TESTS_FILE = '.pytorch-slow-tests.json' + +disabled_tests_dict = {} +slow_tests_dict = {} + +def maybe_load_json(filename): + if os.path.isfile(filename): + with open(filename) as fp: + return json.load(fp) + log.warning("Attempted to load json file '%s' but it does not exist.", filename) + return {} + +# set them here in case the tests are running in a subprocess that doesn't call run_tests +if os.getenv("SLOW_TESTS_FILE", ""): + slow_tests_dict = maybe_load_json(os.getenv("SLOW_TESTS_FILE", "")) +if os.getenv("DISABLED_TESTS_FILE", ""): + disabled_tests_dict = maybe_load_json(os.getenv("DISABLED_TESTS_FILE", "")) + +NATIVE_DEVICES = ('cpu', 'cuda', 'meta', torch._C._get_privateuse1_backend_name()) + +check_names = ['orin', 'concord', 'galen', 'xavier', 'nano', 'jetson', 'tegra'] +IS_JETSON = any(name in platform.platform() for name in check_names) + +def gcIfJetson(fn): + # Irregular Jetson host/device memory setup requires cleanup to avoid tests being killed + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if IS_JETSON: + gc.collect() + torch.cuda.empty_cache() + fn(*args, **kwargs) + return wrapper + +# Tries to extract the current test function by crawling the stack. +# If unsuccessful, return None. +def extract_test_fn() -> Optional[Callable]: + try: + stack = inspect.stack() + for frame_info in stack: + frame = frame_info.frame + if "self" not in frame.f_locals: + continue + self_val = frame.f_locals["self"] + if isinstance(self_val, unittest.TestCase): + test_id = self_val.id() + test_name = test_id.split('.')[2] + test_fn = getattr(self_val, test_name).__func__ + return test_fn + except Exception: + pass + return None + +# Contains tracked input data useful for debugging purposes +@dataclass +class TrackedInput: + index: int + val: Any + type_desc: str + +# Attempt to pull out tracked input information from the test function. +# A TrackedInputIter is used to insert this information. +def get_tracked_input() -> Optional[TrackedInput]: + test_fn = extract_test_fn() + if test_fn is None: + return None + if not hasattr(test_fn, "tracked_input"): + return None + return test_fn.tracked_input + +def clear_tracked_input(): + test_fn = extract_test_fn() + if test_fn is None: + return + if not hasattr(test_fn, "tracked_input"): + return None + test_fn.tracked_input = None + +# Wraps an iterator and tracks the most recent value the iterator produces +# for debugging purposes. Tracked values are stored on the test function. +class TrackedInputIter: + def __init__(self, child_iter, input_type_desc, callback=lambda x: x): + self.child_iter = enumerate(child_iter) + # Input type describes the things we're tracking (e.g. "sample input", "error input"). + self.input_type_desc = input_type_desc + # Callback is run on each iterated thing to get the thing to track. + self.callback = callback + self.test_fn = extract_test_fn() + + def __iter__(self): + return self + + def __next__(self): + # allow StopIteration to bubble up + input_idx, input_val = next(self.child_iter) + self._set_tracked_input( + TrackedInput( + index=input_idx, val=self.callback(input_val), type_desc=self.input_type_desc + ) + ) + return input_val + + def _set_tracked_input(self, tracked_input: TrackedInput): + if self.test_fn is None: + return + if not hasattr(self.test_fn, "tracked_input"): + return + self.test_fn.tracked_input = tracked_input + +class _TestParametrizer: + """ + Decorator class for parametrizing a test function, yielding a set of new tests spawned + from the original generic test, each specialized for a specific set of test inputs. For + example, parametrizing a test across the set of ops will result in a test function per op. + + The decision of how to parametrize / what to parametrize over is intended to be implemented + by each derived class. + + In the details, the decorator adds a 'parametrize_fn' property to the test function. This function + is intended to be called later by one of: + * Device-specific test instantiation via instantiate_device_type_tests(). Note that for this + case there is no need to explicitly parametrize over device type, as that is handled separately. + * Device-agnostic parametrized test instantiation via instantiate_parametrized_tests(). + + If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new + composite 'parametrize_fn' will be created that generates tests with the product of the parameters + generated by the old and new parametrize_fns. This allows for convenient composability of decorators. + """ + def _parametrize_test(self, test, generic_cls, device_cls): + """ + Parametrizes the given test function across whatever dimension is specified by the derived class. + Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all + ops, all modules, or all ops + their associated dtypes. + + Args: + test (fn): Test function to parametrize over + generic_cls (class): Generic test class object containing tests (e.g. TestFoo) + device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None + if the tests are not part of a device-specific set + + Returns: + Generator object returning 4-tuples of: + test (fn): Parametrized test function; must support a device arg and args for any params + test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to + the base name of the test + param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64}) + decorator_fn (callable): Callable[[Dict], List] for list of decorators to apply given param_kwargs + """ + raise NotImplementedError + + def __call__(self, fn): + if hasattr(fn, 'parametrize_fn'): + # Do composition with the product of args. + old_parametrize_fn = fn.parametrize_fn + new_parametrize_fn = self._parametrize_test + fn.parametrize_fn = compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn) + else: + fn.parametrize_fn = self._parametrize_test + return fn + + +def compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn): + """ + Returns a parametrize_fn that parametrizes over the product of the parameters handled + by the given parametrize_fns. Each given parametrize_fn should each have the signature + f(test, generic_cls, device_cls). + + The test names will be a combination of the names produced by the parametrize_fns in + "_" order. This order is done to match intuition for constructed names + when composing multiple decorators; the names will be built in top to bottom order when stacking + parametrization decorators. + + Args: + old_parametrize_fn (callable) - First parametrize_fn to compose. + new_parametrize_fn (callable) - Second parametrize_fn to compose. + """ + + def composite_fn(test, generic_cls, device_cls, + old_parametrize_fn=old_parametrize_fn, + new_parametrize_fn=new_parametrize_fn): + old_tests = list(old_parametrize_fn(test, generic_cls, device_cls)) + for (old_test, old_test_name, old_param_kwargs, old_dec_fn) in old_tests: + for (new_test, new_test_name, new_param_kwargs, new_dec_fn) in \ + new_parametrize_fn(old_test, generic_cls, device_cls): + redundant_params = set(old_param_kwargs.keys()).intersection(new_param_kwargs.keys()) + if redundant_params: + raise RuntimeError('Parametrization over the same parameter by multiple parametrization ' + 'decorators is not supported. For test "{}", the following parameters ' + 'are handled multiple times: {}'.format( + test.__name__, redundant_params)) + full_param_kwargs = {**old_param_kwargs, **new_param_kwargs} + merged_test_name = '{}{}{}'.format(new_test_name, + '_' if old_test_name != '' and new_test_name != '' else '', + old_test_name) + + def merged_decorator_fn(param_kwargs, old_dec_fn=old_dec_fn, new_dec_fn=new_dec_fn): + return list(old_dec_fn(param_kwargs)) + list(new_dec_fn(param_kwargs)) + + yield (new_test, merged_test_name, full_param_kwargs, merged_decorator_fn) + + return composite_fn + + +def instantiate_parametrized_tests(generic_cls): + """ + Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a + decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by + parametrized tests with specialized names. This should be used instead of + instantiate_device_type_tests() if the test class contains device-agnostic tests. + + You can also use it as a class decorator. E.g. + + ``` + @instantiate_parametrized_tests + class TestFoo(TestCase): + ... + ``` + + Args: + generic_cls (class): Generic test class object containing tests (e.g. TestFoo) + """ + for attr_name in tuple(dir(generic_cls)): + class_attr = getattr(generic_cls, attr_name) + if not hasattr(class_attr, 'parametrize_fn'): + continue + + # Remove the generic test from the test class. + delattr(generic_cls, attr_name) + + # Add parametrized tests to the test class. + def instantiate_test_helper(cls, name, test, param_kwargs): + @wraps(test) + def instantiated_test(self, param_kwargs=param_kwargs): + test(self, **param_kwargs) + + assert not hasattr(generic_cls, name), f"Redefinition of test {name}" + setattr(generic_cls, name, instantiated_test) + + for (test, test_suffix, param_kwargs, decorator_fn) in class_attr.parametrize_fn( + class_attr, generic_cls=generic_cls, device_cls=None): + full_name = f'{test.__name__}_{test_suffix}' + + # Apply decorators based on full param kwargs. + for decorator in decorator_fn(param_kwargs): + test = decorator(test) + + instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs) + return generic_cls + + +class subtest: + """ + Explicit subtest case for use with test parametrization. + Allows for explicit naming of individual subtest cases as well as applying + decorators to the parametrized test. + + Args: + arg_values (iterable): Iterable of arg values (e.g. range(10)) or + tuples of arg values (e.g. [(1, 2), (3, 4)]). + name (str): Optional name to use for the test. + decorators (iterable): Iterable of decorators to apply to the generated test. + """ + __slots__ = ['arg_values', 'name', 'decorators'] + + def __init__(self, arg_values, name=None, decorators=None): + self.arg_values = arg_values + self.name = name + self.decorators = decorators if decorators else [] + + +class parametrize(_TestParametrizer): + """ + Decorator for applying generic test parametrizations. + + The interface for this decorator is modeled after `@pytest.mark.parametrize`. + Basic usage between this decorator and pytest's is identical. The first argument + should be a string containing comma-separated names of parameters for the test, and + the second argument should be an iterable returning values or tuples of values for + the case of multiple parameters. + + Beyond this basic usage, the decorator provides some additional functionality that + pytest does not. + + 1. Parametrized tests end up as generated test functions on unittest test classes. + Since this differs from how pytest works, this decorator takes on the additional + responsibility of naming these test functions. The default test names consists of + the test's base name followed by each parameter name + value (e.g. "test_bar_x_1_y_foo"), + but custom names can be defined using `name_fn` or the `subtest` structure (see below). + + 2. The decorator specially handles parameter values of type `subtest`, which allows for + more fine-grained control over both test naming and test execution. In particular, it can + be used to tag subtests with explicit test names or apply arbitrary decorators (see examples + below). + + Examples:: + + @parametrize("x", range(5)) + def test_foo(self, x): + ... + + @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')]) + def test_bar(self, x, y): + ... + + @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')], + name_fn=lambda x, y: '{}_{}'.format(x, y)) + def test_bar_custom_names(self, x, y): + ... + + @parametrize("x, y", [subtest((1, 2), name='double'), + subtest((1, 3), name='triple', decorators=[unittest.expectedFailure]), + subtest((1, 4), name='quadruple')]) + def test_baz(self, x, y): + ... + + To actually instantiate the parametrized tests, one of instantiate_parametrized_tests() or + instantiate_device_type_tests() should be called. The former is intended for test classes + that contain device-agnostic tests, while the latter should be used for test classes that + contain device-specific tests. Both support arbitrary parametrizations using the decorator. + + Args: + arg_str (str): String of arg names separate by commas (e.g. "x,y"). + arg_values (iterable): Iterable of arg values (e.g. range(10)) or + tuples of arg values (e.g. [(1, 2), (3, 4)]). + name_fn (Callable): Optional function that takes in parameters and returns subtest name. + """ + def __init__(self, arg_str, arg_values, name_fn=None): + self.arg_names: List[str] = [s.strip() for s in arg_str.split(',') if s != ''] + self.arg_values = arg_values + self.name_fn = name_fn + + def _formatted_str_repr(self, idx, name, value): + """ Returns a string representation for the given arg that is suitable for use in test function names. """ + if isinstance(value, torch.dtype): + return dtype_name(value) + elif isinstance(value, torch.device): + return str(value) + # Can't use isinstance as it would cause a circular import + elif type(value).__name__ in {'OpInfo', 'ModuleInfo'}: + return value.formatted_name + elif isinstance(value, (int, float, str)): + return f"{name}_{str(value).replace('.', '_')}" + else: + return f"{name}{idx}" + + def _default_subtest_name(self, idx, values): + return '_'.join([self._formatted_str_repr(idx, a, v) for a, v in zip(self.arg_names, values)]) + + def _get_subtest_name(self, idx, values, explicit_name=None): + if explicit_name: + subtest_name = explicit_name + elif self.name_fn: + subtest_name = self.name_fn(*values) + else: + subtest_name = self._default_subtest_name(idx, values) + return subtest_name + + def _parametrize_test(self, test, generic_cls, device_cls): + if len(self.arg_names) == 0: + # No additional parameters needed for the test. + test_name = '' + yield (test, test_name, {}, lambda _: []) + else: + # Each "values" item is expected to be either: + # * A tuple of values with one for each arg. For a single arg, a single item is expected. + # * A subtest instance with arg_values matching the previous. + values = check_exhausted_iterator = object() + for idx, values in enumerate(self.arg_values): + maybe_name = None + + decorators = [] + if isinstance(values, subtest): + sub = values + values = sub.arg_values + maybe_name = sub.name + + @wraps(test) + def test_wrapper(*args, **kwargs): + return test(*args, **kwargs) + + decorators = sub.decorators + gen_test = test_wrapper + else: + gen_test = test + + values = list(values) if len(self.arg_names) > 1 else [values] + if len(values) != len(self.arg_names): + raise RuntimeError(f'Expected # values == # arg names, but got: {len(values)} ' + f'values and {len(self.arg_names)} names for test "{test.__name__}"') + + param_kwargs = dict(zip(self.arg_names, values)) + + test_name = self._get_subtest_name(idx, values, explicit_name=maybe_name) + + def decorator_fn(_, decorators=decorators): + return decorators + + yield (gen_test, test_name, param_kwargs, decorator_fn) + + if values is check_exhausted_iterator: + raise ValueError(f'{test}: An empty arg_values was passed to @parametrize. ' + 'Note that this may result from reuse of a generator.') + + +class decorateIf(_TestParametrizer): + """ + Decorator for applying parameter-specific conditional decoration. + Composes with other test parametrizers (e.g. @modules, @ops, @parametrize, etc.). + + Examples:: + + @decorateIf(unittest.skip, lambda params: params["x"] == 2) + @parametrize("x", range(5)) + def test_foo(self, x): + ... + + @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')]) + @decorateIf( + unittest.expectedFailure, + lambda params: params["x"] == 3 and params["y"] == "baz" + ) + def test_bar(self, x, y): + ... + + @decorateIf( + unittest.expectedFailure, + lambda params: params["op"].name == "add" and params["dtype"] == torch.float16 + ) + @ops(op_db) + def test_op_foo(self, device, dtype, op): + ... + + @decorateIf( + unittest.skip, + lambda params: params["module_info"].module_cls is torch.nn.Linear and \ + params["device"] == "cpu" + ) + @modules(module_db) + def test_module_foo(self, device, dtype, module_info): + ... + + Args: + decorator: Test decorator to apply if the predicate is satisfied. + predicate_fn (Callable): Function taking in a dict of params and returning a boolean + indicating whether the decorator should be applied or not. + """ + def __init__(self, decorator, predicate_fn): + self.decorator = decorator + self.predicate_fn = predicate_fn + + def _parametrize_test(self, test, generic_cls, device_cls): + + # Leave test as-is and return the appropriate decorator_fn. + def decorator_fn(params, decorator=self.decorator, predicate_fn=self.predicate_fn): + if predicate_fn(params): + return [decorator] + else: + return [] + + @wraps(test) + def test_wrapper(*args, **kwargs): + return test(*args, **kwargs) + + test_name = '' + yield (test_wrapper, test_name, {}, decorator_fn) + + +class ProfilingMode(Enum): + LEGACY = 1 + SIMPLE = 2 + PROFILING = 3 + +def cppProfilingFlagsToProfilingMode(): + old_prof_exec_state = torch._C._jit_set_profiling_executor(True) + old_prof_mode_state = torch._C._get_graph_executor_optimize(True) + torch._C._jit_set_profiling_executor(old_prof_exec_state) + torch._C._get_graph_executor_optimize(old_prof_mode_state) + + if old_prof_exec_state: + if old_prof_mode_state: + return ProfilingMode.PROFILING + else: + return ProfilingMode.SIMPLE + else: + return ProfilingMode.LEGACY + +@contextmanager +def enable_profiling_mode_for_profiling_tests(): + if GRAPH_EXECUTOR == ProfilingMode.PROFILING: + old_prof_exec_state = torch._C._jit_set_profiling_executor(True) + old_prof_mode_state = torch._C._get_graph_executor_optimize(True) + try: + yield + finally: + if GRAPH_EXECUTOR == ProfilingMode.PROFILING: + torch._C._jit_set_profiling_executor(old_prof_exec_state) + torch._C._get_graph_executor_optimize(old_prof_mode_state) + +@contextmanager +def enable_profiling_mode(): + old_prof_exec_state = torch._C._jit_set_profiling_executor(True) + old_prof_mode_state = torch._C._get_graph_executor_optimize(True) + try: + yield + finally: + torch._C._jit_set_profiling_executor(old_prof_exec_state) + torch._C._get_graph_executor_optimize(old_prof_mode_state) + +@contextmanager +def num_profiled_runs(num_runs): + old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs) + try: + yield + finally: + torch._C._jit_set_num_profiled_runs(old_num_runs) + +func_call = torch._C.ScriptFunction.__call__ +meth_call = torch._C.ScriptMethod.__call__ + +def prof_callable(callable, *args, **kwargs): + if 'profile_and_replay' in kwargs: + del kwargs['profile_and_replay'] + if GRAPH_EXECUTOR == ProfilingMode.PROFILING: + with enable_profiling_mode_for_profiling_tests(): + callable(*args, **kwargs) + return callable(*args, **kwargs) + + return callable(*args, **kwargs) + +def prof_func_call(*args, **kwargs): + return prof_callable(func_call, *args, **kwargs) + +def prof_meth_call(*args, **kwargs): + return prof_callable(meth_call, *args, **kwargs) + +# TODO fix when https://github.com/python/mypy/issues/2427 is address +torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[assignment] +torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[assignment] + +def _get_test_report_path(): + # allow users to override the test file location. We need this + # because the distributed tests run the same test file multiple + # times with different configurations. + override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE') + test_source = override if override is not None else 'python-unittest' + return os.path.join('test-reports', test_source) + +is_running_via_run_test = "run_test.py" in getattr(__main__, "__file__", "") +parser = argparse.ArgumentParser(add_help=not is_running_via_run_test, allow_abbrev=False) +parser.add_argument('--subprocess', action='store_true', + help='whether to run each test in a subprocess') +parser.add_argument('--seed', type=int, default=1234) +parser.add_argument('--accept', action='store_true') +parser.add_argument('--jit-executor', '--jit_executor', type=str) +parser.add_argument('--repeat', type=int, default=1) +parser.add_argument('--test-bailouts', '--test_bailouts', action='store_true') +parser.add_argument('--use-pytest', action='store_true') +parser.add_argument('--save-xml', nargs='?', type=str, + const=_get_test_report_path(), + default=_get_test_report_path() if IS_CI else None) +parser.add_argument('--discover-tests', action='store_true') +parser.add_argument('--log-suffix', type=str, default="") +parser.add_argument('--run-parallel', type=int, default=1) +parser.add_argument('--import-slow-tests', type=str, nargs='?', const=DEFAULT_SLOW_TESTS_FILE) +parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DEFAULT_DISABLED_TESTS_FILE) +parser.add_argument('--rerun-disabled-tests', action='store_true') +parser.add_argument('--pytest-single-test', type=str, nargs=1) + +# Only run when -h or --help flag is active to display both unittest and parser help messages. +def run_unittest_help(argv): + unittest.main(argv=argv) + +if '-h' in sys.argv or '--help' in sys.argv: + help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,)) + help_thread.start() + help_thread.join() + +args, remaining = parser.parse_known_args() +if args.jit_executor == 'legacy': + GRAPH_EXECUTOR = ProfilingMode.LEGACY +elif args.jit_executor == 'profiling': + GRAPH_EXECUTOR = ProfilingMode.PROFILING +elif args.jit_executor == 'simple': + GRAPH_EXECUTOR = ProfilingMode.SIMPLE +else: + # infer flags based on the default settings + GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode() + +RERUN_DISABLED_TESTS = args.rerun_disabled_tests +# Rerun disabled tests many more times to make sure that they are not flaky anymore +MAX_NUM_RETRIES = 3 if not RERUN_DISABLED_TESTS else 50 + +SLOW_TESTS_FILE = args.import_slow_tests +DISABLED_TESTS_FILE = args.import_disabled_tests +LOG_SUFFIX = args.log_suffix +RUN_PARALLEL = args.run_parallel +TEST_BAILOUTS = args.test_bailouts +USE_PYTEST = args.use_pytest +PYTEST_SINGLE_TEST = args.pytest_single_test +TEST_DISCOVER = args.discover_tests +TEST_IN_SUBPROCESS = args.subprocess +TEST_SAVE_XML = args.save_xml +REPEAT_COUNT = args.repeat +SEED = args.seed +if not expecttest.ACCEPT: + expecttest.ACCEPT = args.accept +UNITTEST_ARGS = [sys.argv[0]] + remaining +torch.manual_seed(SEED) + +# CI Prefix path used only on CI environment +CI_TEST_PREFIX = str(Path(os.getcwd())) +CI_PT_ROOT = str(Path(os.getcwd()).parent) +CI_FUNCTORCH_ROOT = str(os.path.join(Path(os.getcwd()).parent, "functorch")) + +def wait_for_process(p, timeout=None): + try: + return p.wait(timeout=timeout) + except KeyboardInterrupt: + # Give `p` a chance to handle KeyboardInterrupt. Without this, + # `pytest` can't print errors it collected so far upon KeyboardInterrupt. + exit_status = p.wait(timeout=5) + if exit_status is not None: + return exit_status + else: + p.kill() + raise + except subprocess.TimeoutExpired: + # send SIGINT to give pytest a chance to make xml + p.send_signal(signal.SIGINT) + exit_status = None + try: + exit_status = p.wait(timeout=5) + # try to handle the case where p.wait(timeout=5) times out as well as + # otherwise the wait() call in the finally block can potentially hang + except subprocess.TimeoutExpired: + pass + if exit_status is not None: + return exit_status + else: + p.kill() + raise + except: # noqa: B001,E722, copied from python core library + p.kill() + raise + finally: + # Always call p.wait() to ensure exit + p.wait() + +def shell(command, cwd=None, env=None, stdout=None, stderr=None, timeout=None): + sys.stdout.flush() + sys.stderr.flush() + # The following cool snippet is copied from Py3 core library subprocess.call + # only the with + # 1. `except KeyboardInterrupt` block added for SIGINT handling. + # 2. In Py2, subprocess.Popen doesn't return a context manager, so we do + # `p.wait()` in a `final` block for the code to be portable. + # + # https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323 + assert not isinstance(command, str), "Command to shell should be a list or tuple of tokens" + p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env, stdout=stdout, stderr=stderr) + return wait_for_process(p, timeout=timeout) + + +def retry_shell( + command, + cwd=None, + env=None, + stdout=None, + stderr=None, + timeout=None, + retries=1, + was_rerun=False, +) -> Tuple[int, bool]: + # Returns exicode + whether it was rerun + assert ( + retries >= 0 + ), f"Expecting non negative number for number of retries, got {retries}" + try: + exit_code = shell( + command, cwd=cwd, env=env, stdout=stdout, stderr=stderr, timeout=timeout + ) + if exit_code == 0 or retries == 0: + return exit_code, was_rerun + print( + f"Got exit code {exit_code}, retrying (retries left={retries})", + file=stdout, + flush=True, + ) + except subprocess.TimeoutExpired: + if retries == 0: + print( + f"Command took >{timeout // 60}min, returning 124", + file=stdout, + flush=True, + ) + return 124, was_rerun + print( + f"Command took >{timeout // 60}min, retrying (retries left={retries})", + file=stdout, + flush=True, + ) + return retry_shell( + command, + cwd=cwd, + env=env, + stdout=stdout, + stderr=stderr, + timeout=timeout, + retries=retries - 1, + was_rerun=True, + ) + + +def discover_test_cases_recursively(suite_or_case): + if isinstance(suite_or_case, unittest.TestCase): + return [suite_or_case] + rc = [] + for element in suite_or_case: + print(element) + rc.extend(discover_test_cases_recursively(element)) + return rc + +def get_test_names(test_cases): + return ['.'.join(case.id().split('.')[-2:]) for case in test_cases] + +def _print_test_names(): + suite = unittest.TestLoader().loadTestsFromModule(__main__) + test_cases = discover_test_cases_recursively(suite) + for name in get_test_names(test_cases): + print(name) + +def chunk_list(lst, nchunks): + return [lst[i::nchunks] for i in range(nchunks)] + +# sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api +def sanitize_test_filename(filename): + # inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed + if filename.startswith(CI_TEST_PREFIX): + filename = filename[len(CI_TEST_PREFIX) + 1:] + strip_py = re.sub(r'.py$', '', filename) + return re.sub('/', r'.', strip_py) + +def lint_test_case_extension(suite): + succeed = True + for test_case_or_suite in suite: + test_case = test_case_or_suite + if isinstance(test_case_or_suite, unittest.TestSuite): + first_test = test_case_or_suite._tests[0] if len(test_case_or_suite._tests) > 0 else None + if first_test is not None and isinstance(first_test, unittest.TestSuite): + return succeed and lint_test_case_extension(test_case_or_suite) + test_case = first_test + + if test_case is not None: + test_class = test_case.id().split('.', 1)[1].split('.')[0] + if not isinstance(test_case, TestCase): + err = "This test class should extend from torch.testing._internal.common_utils.TestCase but it doesn't." + print(f"{test_class} - failed. {err}") + succeed = False + return succeed + + +def get_report_path(argv=UNITTEST_ARGS, pytest=False): + test_filename = sanitize_test_filename(argv[0]) + test_report_path = TEST_SAVE_XML + LOG_SUFFIX + test_report_path = os.path.join(test_report_path, test_filename) + if pytest: + test_report_path = test_report_path.replace('python-unittest', 'python-pytest') + os.makedirs(test_report_path, exist_ok=True) + test_report_path = os.path.join(test_report_path, f"{test_filename}-{os.urandom(8).hex()}.xml") + return test_report_path + os.makedirs(test_report_path, exist_ok=True) + return test_report_path + + +def sanitize_pytest_xml(xml_file: str): + # pytext xml is different from unittext xml, this function makes pytest xml more similar to unittest xml + # consider somehow modifying the XML logger in conftest to do this instead + import xml.etree.ElementTree as ET + tree = ET.parse(xml_file) + for testcase in tree.iter('testcase'): + full_classname = testcase.attrib['classname'] + # The test prefix is optional + regex_result = re.search(r"^(test\.)?(?P.*)\.(?P[^\.]*)$", full_classname) + if regex_result is None: + continue + classname = regex_result.group("classname") + file = regex_result.group("file").replace(".", "/") + testcase.set("classname", classname) + testcase.set("file", f"{file}.py") + tree.write(xml_file) + + +def get_pytest_test_cases(argv: List[str]) -> List[str]: + class TestCollectorPlugin: + def __init__(self): + self.tests = [] + + def pytest_collection_finish(self, session): + for item in session.items: + self.tests.append(session.config.cwd_relative_nodeid(item.nodeid)) + + test_collector_plugin = TestCollectorPlugin() + import pytest + pytest.main( + [arg for arg in argv if arg != '-vv'] + ['--collect-only', '-qq', '--use-main-module'], + plugins=[test_collector_plugin] + ) + return test_collector_plugin.tests + + +def run_tests(argv=UNITTEST_ARGS): + # import test files. + if SLOW_TESTS_FILE: + if os.path.exists(SLOW_TESTS_FILE): + with open(SLOW_TESTS_FILE) as fp: + global slow_tests_dict + slow_tests_dict = json.load(fp) + # use env vars so pytest-xdist subprocesses can still access them + os.environ['SLOW_TESTS_FILE'] = SLOW_TESTS_FILE + else: + warnings.warn(f'slow test file provided but not found: {SLOW_TESTS_FILE}') + if DISABLED_TESTS_FILE: + if os.path.exists(DISABLED_TESTS_FILE): + with open(DISABLED_TESTS_FILE) as fp: + global disabled_tests_dict + disabled_tests_dict = json.load(fp) + os.environ['DISABLED_TESTS_FILE'] = DISABLED_TESTS_FILE + else: + warnings.warn(f'disabled test file provided but not found: {DISABLED_TESTS_FILE}') + # Determine the test launch mechanism + if TEST_DISCOVER: + _print_test_names() + return + + # Before running the tests, lint to check that every test class extends from TestCase + suite = unittest.TestLoader().loadTestsFromModule(__main__) + if not lint_test_case_extension(suite): + sys.exit(1) + + if TEST_IN_SUBPROCESS: + other_args = [] + if DISABLED_TESTS_FILE: + other_args.append("--import-disabled-tests") + if SLOW_TESTS_FILE: + other_args.append("--import-slow-tests") + if USE_PYTEST: + other_args.append("--use-pytest") + if RERUN_DISABLED_TESTS: + other_args.append("--rerun-disabled-tests") + + test_cases = ( + get_pytest_test_cases(argv) if USE_PYTEST else + [case.id().split('.', 1)[1] for case in discover_test_cases_recursively(suite)] + ) + + failed_tests = [] + + for test_case_full_name in test_cases: + + cmd = ( + [sys.executable] + [argv[0]] + other_args + argv[1:] + + (["--pytest-single-test"] if USE_PYTEST else []) + + [test_case_full_name] + ) + string_cmd = " ".join(cmd) + + timeout = None if RERUN_DISABLED_TESTS else 15 * 60 + + exitcode, _ = retry_shell(cmd, timeout=timeout, retries=0 if RERUN_DISABLED_TESTS else 1) + + if exitcode != 0: + # This is sort of hacky, but add on relevant env variables for distributed tests. + if 'TestDistBackendWithSpawn' in test_case_full_name: + backend = os.environ.get("BACKEND", "") + world_size = os.environ.get("WORLD_SIZE", "") + env_prefix = f"BACKEND={backend} WORLD_SIZE={world_size}" + string_cmd = env_prefix + " " + string_cmd + # Log the command to reproduce the failure. + print(f"Test exited with non-zero exitcode {exitcode}. Command to reproduce: {string_cmd}") + failed_tests.append(test_case_full_name) + + assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format( + len(failed_tests), '\n\t'.join(failed_tests)) + + elif RUN_PARALLEL > 1: + test_cases = discover_test_cases_recursively(suite) + test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL) + processes = [] + for i in range(RUN_PARALLEL): + command = [sys.executable] + argv + [f'--log-suffix=-shard-{i + 1}'] + test_batches[i] + processes.append(subprocess.Popen(command, universal_newlines=True)) + failed = False + for p in processes: + failed |= wait_for_process(p) != 0 + assert not failed, "Some test shards have failed" + elif USE_PYTEST: + pytest_args = argv + ["--use-main-module"] + if TEST_SAVE_XML: + test_report_path = get_report_path(pytest=True) + print(f'Test results will be stored in {test_report_path}') + pytest_args.append(f'--junit-xml-reruns={test_report_path}') + if PYTEST_SINGLE_TEST: + pytest_args = PYTEST_SINGLE_TEST + pytest_args[1:] + + import pytest + os.environ["NO_COLOR"] = "1" + exit_code = pytest.main(args=pytest_args) + if TEST_SAVE_XML: + sanitize_pytest_xml(test_report_path) + + if not RERUN_DISABLED_TESTS: + # exitcode of 5 means no tests were found, which happens since some test configs don't + # run tests from certain files + sys.exit(0 if exit_code == 5 else exit_code) + else: + # Only record the test report and always return a success code when running under rerun + # disabled tests mode + sys.exit(0) + elif TEST_SAVE_XML is not None: + # import here so that non-CI doesn't need xmlrunner installed + import xmlrunner # type: ignore[import] + from xmlrunner.result import _XMLTestResult # type: ignore[import] + + class XMLTestResultVerbose(_XMLTestResult): + """ + Adding verbosity to test outputs: + by default test summary prints 'skip', + but we want to also print the skip reason. + GH issue: https://github.com/pytorch/pytorch/issues/69014 + + This works with unittest_xml_reporting<=3.2.0,>=2.0.0 + (3.2.0 is latest at the moment) + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def addSkip(self, test, reason): + super().addSkip(test, reason) + for c in self.callback.__closure__: + if isinstance(c.cell_contents, str) and c.cell_contents == 'skip': + # this message is printed in test summary; + # it stands for `verbose_str` captured in the closure + c.cell_contents = f"skip: {reason}" + + def printErrors(self) -> None: + super().printErrors() + self.printErrorList("XPASS", self.unexpectedSuccesses) + test_report_path = get_report_path() + verbose = '--verbose' in argv or '-v' in argv + if verbose: + print(f'Test results will be stored in {test_report_path}') + unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner( + output=test_report_path, + verbosity=2 if verbose else 1, + resultclass=XMLTestResultVerbose)) + elif REPEAT_COUNT > 1: + for _ in range(REPEAT_COUNT): + if not unittest.main(exit=False, argv=argv).result.wasSuccessful(): + sys.exit(-1) + else: + unittest.main(argv=argv) + +IS_LINUX = sys.platform == "linux" +IS_WINDOWS = sys.platform == "win32" +IS_MACOS = sys.platform == "darwin" +IS_PPC = platform.machine() == "ppc64le" +IS_X86 = platform.machine() in ('x86_64', 'i386') +IS_ARM64 = platform.machine() in ('arm64', 'aarch64') + +def is_avx512_vnni_supported(): + if sys.platform != 'linux': + return False + with open("/proc/cpuinfo", encoding="ascii") as f: + lines = f.read() + return "vnni" in lines + +IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported() + +if IS_WINDOWS: + @contextmanager + def TemporaryFileName(*args, **kwargs): + # Ideally we would like to not have to manually delete the file, but NamedTemporaryFile + # opens the file, and it cannot be opened multiple times in Windows. To support Windows, + # close the file after creation and try to remove it manually + if 'delete' in kwargs: + if kwargs['delete'] is not False: + raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.") + else: + kwargs['delete'] = False + f = tempfile.NamedTemporaryFile(*args, **kwargs) + try: + f.close() + yield f.name + finally: + os.unlink(f.name) +else: + @contextmanager # noqa: T484 + def TemporaryFileName(*args, **kwargs): + with tempfile.NamedTemporaryFile(*args, **kwargs) as f: + yield f.name + +if IS_WINDOWS: + @contextmanager + def TemporaryDirectoryName(suffix=None): + # On Windows the directory created by TemporaryDirectory is likely to be removed prematurely, + # so we first create the directory using mkdtemp and then remove it manually + try: + dir_name = tempfile.mkdtemp(suffix=suffix) + yield dir_name + finally: + shutil.rmtree(dir_name) +else: + @contextmanager # noqa: T484 + def TemporaryDirectoryName(suffix=None): + with tempfile.TemporaryDirectory(suffix=suffix) as d: + yield d + +IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8' + +def _check_module_exists(name: str) -> bool: + r"""Returns if a top-level module with :attr:`name` exists *without** + importing it. This is generally safer than try-catch block around a + `import X`. It avoids third party libraries breaking assumptions of some of + our tests, e.g., setting multiprocessing start method when imported + (see librosa/#747, torchvision/#544). + """ + try: + import importlib.util + spec = importlib.util.find_spec(name) + return spec is not None + except ImportError: + return False + +TEST_NUMPY = _check_module_exists('numpy') +TEST_FAIRSEQ = _check_module_exists('fairseq') +TEST_SCIPY = _check_module_exists('scipy') +TEST_MKL = torch.backends.mkl.is_available() +TEST_MPS = torch.backends.mps.is_available() +TEST_CUDA = torch.cuda.is_available() +custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name(), None) +TEST_PRIVATEUSE1 = True if (hasattr(custom_device_mod, "is_available") and custom_device_mod.is_available()) else False +TEST_NUMBA = _check_module_exists('numba') + +TEST_DILL = _check_module_exists('dill') + +TEST_LIBROSA = _check_module_exists('librosa') and not IS_ARM64 + +TEST_OPT_EINSUM = _check_module_exists('opt_einsum') + +TEST_Z3 = _check_module_exists('z3') + +BUILD_WITH_CAFFE2 = torch.onnx._CAFFE2_ATEN_FALLBACK + +def split_if_not_empty(x: str): + return x.split(",") if len(x) != 0 else [] + +NOTEST_CPU = "cpu" in split_if_not_empty(os.getenv('PYTORCH_TESTING_DEVICE_EXCEPT_FOR', '')) + +# Python 2.7 doesn't have spawn +TestEnvironment.def_flag("NO_MULTIPROCESSING_SPAWN", env_var="NO_MULTIPROCESSING_SPAWN") +TestEnvironment.def_flag("TEST_WITH_ASAN", env_var="PYTORCH_TEST_WITH_ASAN") +TestEnvironment.def_flag("TEST_WITH_DEV_DBG_ASAN", env_var="PYTORCH_TEST_WITH_DEV_DBG_ASAN") +TestEnvironment.def_flag("TEST_WITH_TSAN", env_var="PYTORCH_TEST_WITH_TSAN") +TestEnvironment.def_flag("TEST_WITH_UBSAN", env_var="PYTORCH_TEST_WITH_UBSAN") +TestEnvironment.def_flag("TEST_WITH_ROCM", env_var="PYTORCH_TEST_WITH_ROCM") + +# TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen +# See #64427 +TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1' +# Enables tests that are slow to run (disabled by default) +TestEnvironment.def_flag("TEST_WITH_SLOW", env_var="PYTORCH_TEST_WITH_SLOW") + +# Disables non-slow tests (these tests enabled by default) +# This is usually used in conjunction with TEST_WITH_SLOW to +# run *only* slow tests. (I could have done an enum, but +# it felt a little awkward. +TestEnvironment.def_flag("TEST_SKIP_FAST", env_var="PYTORCH_TEST_SKIP_FAST") + +# Enables crossref tests, in addition to standard tests which +# are being run. crossref tests work by installing a torch +# function mode that runs extra compute alongside the regular +# computation that happens with the test. After both computations +# are done, we cross-reference them (thus the name) to check for +# correction, before throwing out the extra compute and proceeding +# as we had before. By default, we don't run these tests. +TestEnvironment.def_flag("TEST_WITH_CROSSREF", env_var="PYTORCH_TEST_WITH_CROSSREF") + +TestEnvironment.def_flag("TEST_SKIP_CUDAGRAPH", env_var="PYTORCH_TEST_SKIP_CUDAGRAPH") +TEST_CUDA_GRAPH = TEST_CUDA and (not TEST_SKIP_CUDAGRAPH) and ( + (torch.version.cuda and int(torch.version.cuda.split(".")[0]) >= 11) or + (torch.version.hip and float(".".join(torch.version.hip.split(".")[0:2])) >= 5.3) +) + +if TEST_CUDA and 'NUM_PARALLEL_PROCS' in os.environ: + num_procs = int(os.getenv("NUM_PARALLEL_PROCS", "2")) + # other libraries take up about 11% of space per process + torch.cuda.set_per_process_memory_fraction(round(1 / num_procs - .11, 2)) + + +def skipIfCrossRef(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_WITH_CROSSREF: + raise unittest.SkipTest("test doesn't currently with crossref") + else: + fn(*args, **kwargs) + return wrapper + +class CrossRefMode(torch.overrides.TorchFunctionMode): + def __torch_function__(self, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + r = func(*args, **kwargs) + return r + +# Run PyTorch tests with TorchDynamo +TestEnvironment.def_flag("TEST_WITH_TORCHINDUCTOR", env_var="PYTORCH_TEST_WITH_INDUCTOR") +# AOT_EAGER not tested in ci, useful for debugging +TestEnvironment.def_flag("TEST_WITH_AOT_EAGER", env_var="PYTORCH_TEST_WITH_AOT_EAGER") +TestEnvironment.def_flag("TEST_WITH_TORCHDYNAMO", env_var="PYTORCH_TEST_WITH_DYNAMO", + implied_by_fn=lambda: TEST_WITH_TORCHINDUCTOR or TEST_WITH_AOT_EAGER) + +if TEST_WITH_TORCHDYNAMO: + import torch._dynamo + # Do not spend time on helper functions that are called with different inputs + torch._dynamo.config.accumulated_cache_size_limit = 8 + if TEST_WITH_TORCHINDUCTOR: + import torch._inductor.config + torch._inductor.config.fallback_random = True + + +def xpassIfTorchDynamo(func): + return func if TEST_WITH_TORCHDYNAMO else unittest.expectedFailure(func) + + +def xfailIfTorchDynamo(func): + return unittest.expectedFailure(func) if TEST_WITH_TORCHDYNAMO else func + + +def skipIfTorchDynamo(msg="test doesn't currently work with dynamo"): + def decorator(fn): + if not isinstance(fn, type): + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_WITH_TORCHDYNAMO: + raise unittest.SkipTest(msg) + else: + fn(*args, **kwargs) + return wrapper + + assert(isinstance(fn, type)) + if TEST_WITH_TORCHDYNAMO: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = msg + + return fn + + + return decorator + +def skipIfTorchInductor(msg="test doesn't currently work with torchinductor", + condition=TEST_WITH_TORCHINDUCTOR): + def decorator(fn): + if not isinstance(fn, type): + @wraps(fn) + def wrapper(*args, **kwargs): + if condition: + raise unittest.SkipTest(msg) + else: + fn(*args, **kwargs) + return wrapper + + assert(isinstance(fn, type)) + if condition: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = msg + + return fn + + return decorator + +def markDynamoStrictTest(cls_or_func): + """ + Marks the test as 'strict'. In strict mode, we reset before and after the + test, and run without suppress errors. + """ + if inspect.isclass(cls_or_func): + cls_or_func.dynamo_strict = True + return cls_or_func + + fn = cls_or_func + + @wraps(fn) + def wrapper(*args, **kwargs): + torch._dynamo.reset() + with unittest.mock.patch("torch._dynamo.config.suppress_errors", False): + fn(*args, **kwargs) + torch._dynamo.reset() + return wrapper + + +def skipRocmIfTorchInductor(msg="test doesn't currently work with torchinductor on the ROCm stack"): + return skipIfTorchInductor(msg=msg, condition=TEST_WITH_ROCM and TEST_WITH_TORCHINDUCTOR) + +def skipIfLegacyJitExecutor(msg="test doesn't currently work with legacy JIT executor"): + def decorator(fn): + if not isinstance(fn, type): + @wraps(fn) + def wrapper(*args, **kwargs): + if GRAPH_EXECUTOR == ProfilingMode.LEGACY: + raise unittest.SkipTest(msg) + else: + fn(*args, **kwargs) + return wrapper + + assert(isinstance(fn, type)) + if GRAPH_EXECUTOR == ProfilingMode.LEGACY: + fn.__unittest_skip__ = True + fn.__unittest_skip_why__ = msg + + return fn + + + return decorator + + +# Run PyTorch tests with translation validation on. +TEST_WITH_TV = os.getenv('PYTORCH_TEST_WITH_TV') == '1' + +if TEST_WITH_TV: + torch.fx.experimental._config.translation_validation = True + +# Some tests take too long when dynamic_shapes is combined with +# translation_validation. Whenever that happens, we solve that by +# disabling translation_validation. +def disable_translation_validation_if_dynamic_shapes(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if torch._dynamo.config.dynamic_shapes: + # Turning TV off due to high latency on dynamic shapes. + torch.fx.experimental._config.translation_validation = False + return fn(*args, **kwargs) + return wrapper + + +# Determine whether to enable cuda memory leak check. +# CUDA mem leak check is expensive and thus we don't want to execute it on every +# test case / configuration. +# If this is True then CUDA memory leak checks are skipped. If this is false +# then CUDA memory leak checks are performed. +# See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135 +TestEnvironment.def_flag("TEST_CUDA_MEM_LEAK_CHECK", env_var="PYTORCH_TEST_CUDA_MEM_LEAK_CHECK") + +# True if CI is running TBB-enabled Pytorch +IS_TBB = "tbb" in os.getenv("BUILD_ENVIRONMENT", "") + +# Dict of NumPy dtype -> torch dtype (when the correspondence exists) +numpy_to_torch_dtype_dict = { + np.bool_ : torch.bool, + np.uint8 : torch.uint8, + np.int8 : torch.int8, + np.int16 : torch.int16, + np.int32 : torch.int32, + np.int64 : torch.int64, + np.float16 : torch.float16, + np.float32 : torch.float32, + np.float64 : torch.float64, + np.complex64 : torch.complex64, + np.complex128 : torch.complex128 +} + + +# numpy dtypes like np.float64 are not instances, but rather classes. This leads to rather absurd cases like +# np.float64 != np.dtype("float64") but np.float64 == np.dtype("float64").type. +# Especially when checking against a reference we can't be sure which variant we get, so we simply try both. +def numpy_to_torch_dtype(np_dtype): + try: + return numpy_to_torch_dtype_dict[np_dtype] + except KeyError: + return numpy_to_torch_dtype_dict[np_dtype.type] + + +def has_corresponding_torch_dtype(np_dtype): + try: + numpy_to_torch_dtype(np_dtype) + return True + except KeyError: + return False + + +if IS_WINDOWS: + # Size of `np.intc` is platform defined. + # It is returned by functions like `bitwise_not`. + # On Windows `int` is 32-bit + # https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160 + numpy_to_torch_dtype_dict[np.intc] = torch.int + +# Dict of torch dtype -> NumPy dtype +torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()} +torch_to_numpy_dtype_dict.update({ + torch.bfloat16: np.float32, + torch.complex32: np.complex64 +}) + +def skipIfRocm(func=None, *, msg="test doesn't currently work on the ROCm stack"): + def dec_fn(fn): + reason = f"skipIfRocm: {msg}" + + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_WITH_ROCM: + raise unittest.SkipTest(reason) + else: + return fn(*args, **kwargs) + return wrapper + if func: + return dec_fn(func) + return dec_fn + +def runOnRocm(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_WITH_ROCM: + fn(*args, **kwargs) + else: + raise unittest.SkipTest("test currently only works on the ROCm stack") + return wrapper + +def skipIfMps(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if TEST_MPS: + raise unittest.SkipTest("test doesn't currently work with MPS") + else: + fn(*args, **kwargs) + return wrapper + +# Skips a test on CUDA if ROCm is available and its version is lower than requested. +def skipIfRocmVersionLessThan(version=None): + def dec_fn(fn): + @wraps(fn) + def wrap_fn(self, *args, **kwargs): + if TEST_WITH_ROCM: + rocm_version = str(torch.version.hip) + rocm_version = rocm_version.split("-")[0] # ignore git sha + rocm_version_tuple = tuple(int(x) for x in rocm_version.split(".")) + if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version): + reason = f"ROCm {rocm_version_tuple} is available but {version} required" + raise unittest.SkipTest(reason) + return fn(self, *args, **kwargs) + return wrap_fn + return dec_fn + +def skipIfNotMiopenSuggestNHWC(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not TEST_WITH_MIOPEN_SUGGEST_NHWC: + raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation") + else: + fn(*args, **kwargs) + return wrapper + + +# Reverts the linalg backend back to default to make sure potential failures in one +# test do not affect other tests +def setLinalgBackendsToDefaultFinally(fn): + @wraps(fn) + def _fn(*args, **kwargs): + _preferred_backend = torch.backends.cuda.preferred_linalg_library() + try: + fn(*args, **kwargs) + finally: + torch.backends.cuda.preferred_linalg_library(_preferred_backend) + return _fn + + +# Context manager for setting deterministic flag and automatically +# resetting it to its original value +class DeterministicGuard: + def __init__(self, deterministic, *, warn_only=False, fill_uninitialized_memory=True): + self.deterministic = deterministic + self.warn_only = warn_only + self.fill_uninitialized_memory = fill_uninitialized_memory + + def __enter__(self): + self.deterministic_restore = torch.are_deterministic_algorithms_enabled() + self.warn_only_restore = torch.is_deterministic_algorithms_warn_only_enabled() + self.fill_uninitialized_memory_restore = torch.utils.deterministic.fill_uninitialized_memory + torch.use_deterministic_algorithms( + self.deterministic, + warn_only=self.warn_only) + torch.utils.deterministic.fill_uninitialized_memory = self.fill_uninitialized_memory + + def __exit__(self, exception_type, exception_value, traceback): + torch.use_deterministic_algorithms( + self.deterministic_restore, + warn_only=self.warn_only_restore) + torch.utils.deterministic.fill_uninitialized_memory = self.fill_uninitialized_memory_restore + +class AlwaysWarnTypedStorageRemoval: + def __init__(self, always_warn): + assert isinstance(always_warn, bool) + self.always_warn = always_warn + + def __enter__(self): + self.always_warn_restore = torch.storage._get_always_warn_typed_storage_removal() + torch.storage._set_always_warn_typed_storage_removal(self.always_warn) + + def __exit__(self, exception_type, exception_value, traceback): + torch.storage._set_always_warn_typed_storage_removal(self.always_warn_restore) + +# Context manager for setting cuda sync debug mode and reset it +# to original value +# we are not exposing it to the core because sync debug mode is +# global and thus not thread safe +class CudaSyncGuard: + def __init__(self, sync_debug_mode): + self.mode = sync_debug_mode + + def __enter__(self): + self.debug_mode_restore = torch.cuda.get_sync_debug_mode() + torch.cuda.set_sync_debug_mode(self.mode) + + def __exit__(self, exception_type, exception_value, traceback): + torch.cuda.set_sync_debug_mode(self.debug_mode_restore) + +# This decorator can be used for API tests that call +# torch.use_deterministic_algorithms(). When the test is finished, it will +# restore the previous deterministic flag setting. +# +# If CUDA >= 10.2, this will set the environment variable +# CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that +# setting is not thrown during the test unless the test changes that variable +# on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be +# restored once the test is finished. +# +# Note that if a test requires CUDA to actually register the changed +# CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because +# CUDA only checks the variable when the runtime initializes. Tests can be +# run inside a subprocess like so: +# +# import subprocess, sys, os +# script = ''' +# # Test code should go here +# ''' +# try: +# subprocess.check_output( +# [sys.executable, '-c', script], +# stderr=subprocess.STDOUT, +# cwd=os.path.dirname(os.path.realpath(__file__)), +# env=os.environ.copy()) +# except subprocess.CalledProcessError as e: +# error_message = e.output.decode('utf-8') +# # Handle exceptions raised by the subprocess here +# +def wrapDeterministicFlagAPITest(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + with DeterministicGuard( + torch.are_deterministic_algorithms_enabled(), + warn_only=torch.is_deterministic_algorithms_warn_only_enabled()): + class CuBLASConfigGuard: + cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG' + + def __enter__(self): + self.is_cuda10_2_or_higher = ( + (torch.version.cuda is not None) + and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2])) + if self.is_cuda10_2_or_higher: + self.cublas_config_restore = os.environ.get(self.cublas_var_name) + os.environ[self.cublas_var_name] = ':4096:8' + + def __exit__(self, exception_type, exception_value, traceback): + if self.is_cuda10_2_or_higher: + cur_cublas_config = os.environ.get(self.cublas_var_name) + if self.cublas_config_restore is None: + if cur_cublas_config is not None: + del os.environ[self.cublas_var_name] + else: + os.environ[self.cublas_var_name] = self.cublas_config_restore + with CuBLASConfigGuard(): + fn(*args, **kwargs) + return wrapper + +def skipIfCompiledWithoutNumpy(fn): + # Even if the numpy module is present, if `USE_NUMPY=0` is used during the + # build, numpy tests will fail + numpy_support = TEST_NUMPY + if numpy_support: + try: + # The numpy module is present, verify that PyTorch is compiled with + # numpy support + torch.from_numpy(np.array([2, 2])) + except RuntimeError: + numpy_support = False + + @wraps(fn) + def wrapper(*args, **kwargs): + if not numpy_support: + raise unittest.SkipTest("PyTorch was compiled without numpy support") + else: + fn(*args, **kwargs) + return wrapper + +def _test_function(fn, device): + def run_test_function(self): + return fn(self, device) + return run_test_function + +def skipIfNoXNNPACK(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not torch.backends.xnnpack.enabled: + raise unittest.SkipTest('XNNPACK must be enabled for these tests. Please build with USE_XNNPACK=1.') + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNoLapack(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not torch._C.has_lapack: + raise unittest.SkipTest('PyTorch compiled without Lapack') + else: + fn(*args, **kwargs) + return wrapper + +def skipIfNotRegistered(op_name, message): + """Wraps the decorator to hide the import of the `core`. + + Args: + op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`. + message: message to fail with. + + Usage: + @skipIfNotRegistered('MyOp', 'MyOp is not linked!') + This will check if 'MyOp' is in the caffe2.python.core + """ + if not BUILD_WITH_CAFFE2: + return unittest.skip("Pytorch is compiled without Caffe2") + try: + from caffe2.python import core + skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS, + message) + except ImportError: + skipper = unittest.skip("Cannot import `caffe2.python.core`") + return skipper + +def _decide_skip_caffe2(expect_caffe2, reason): + def skip_dec(func): + @wraps(func) + def wrapper(self): + if torch.onnx._CAFFE2_ATEN_FALLBACK != expect_caffe2: + raise unittest.SkipTest(reason) + return func(self) + return wrapper + return skip_dec + +skipIfCaffe2 = _decide_skip_caffe2(False, "Not compatible with Caffe2") +skipIfNoCaffe2 = _decide_skip_caffe2(True, "Caffe2 is not available") + +def skipIfNoSciPy(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not TEST_SCIPY: + raise unittest.SkipTest("test require SciPy, but SciPy not found") + else: + fn(*args, **kwargs) + return wrapper + + +def skipIfTBB(message="This test makes TBB sad"): + def dec_fn(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if IS_TBB: + raise unittest.SkipTest(message) + else: + fn(*args, **kwargs) + return wrapper + return dec_fn + + +def slowTest(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + if not TEST_WITH_SLOW: + raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test") + else: + fn(*args, **kwargs) + wrapper.__dict__['slow_test'] = True + return wrapper + + +def slowTestIf(condition): + return slowTest if condition else lambda fn: fn + + +def skipCUDAMemoryLeakCheckIf(condition): + def dec(fn): + if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True + fn._do_cuda_memory_leak_check = not condition + return fn + return dec + +def skipCUDANonDefaultStreamIf(condition): + def dec(fn): + if getattr(fn, '_do_cuda_non_default_stream', True): # if current True + fn._do_cuda_non_default_stream = not condition + return fn + return dec + +def suppress_warnings(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + fn(*args, **kwargs) + return wrapper + + +def to_gpu(obj, type_map=None): + if type_map is None: + type_map = {} + if isinstance(obj, torch.Tensor): + assert obj.is_leaf + t = type_map.get(obj.dtype, obj.dtype) + with torch.no_grad(): + res = obj.clone().to(dtype=t, device="cuda") + res.requires_grad = obj.requires_grad + return res + elif torch.is_storage(obj): + return obj.new().resize_(obj.size()).copy_(obj) + elif isinstance(obj, list): + return [to_gpu(o, type_map) for o in obj] + elif isinstance(obj, tuple): + return tuple(to_gpu(o, type_map) for o in obj) + else: + return deepcopy(obj) + + +def get_function_arglist(func): + return inspect.getfullargspec(func).args + + +def set_rng_seed(seed): + torch.manual_seed(seed) + random.seed(seed) + if TEST_NUMPY: + np.random.seed(seed) + + +disable_functorch = torch._C._DisableFuncTorch + + +@contextlib.contextmanager +def freeze_rng_state(): + # no_dispatch needed for test_composite_compliance + # Some OpInfos use freeze_rng_state for rng determinism, but + # test_composite_compliance overrides dispatch for all torch functions + # which we need to disable to get and set rng state + with no_dispatch(), disable_functorch(): + rng_state = torch.get_rng_state() + if torch.cuda.is_available(): + cuda_rng_state = torch.cuda.get_rng_state() + try: + yield + finally: + # Modes are not happy with torch.cuda.set_rng_state + # because it clones the state (which could produce a Tensor Subclass) + # and then grabs the new tensor's data pointer in generator.set_state. + # + # In the long run torch.cuda.set_rng_state should probably be + # an operator. + # + # NB: Mode disable is to avoid running cross-ref tests on thes seeding + with no_dispatch(), disable_functorch(): + if torch.cuda.is_available(): + torch.cuda.set_rng_state(cuda_rng_state) + torch.set_rng_state(rng_state) + +@contextlib.contextmanager +def set_default_dtype(dtype): + saved_dtype = torch.get_default_dtype() + torch.set_default_dtype(dtype) + try: + yield + finally: + torch.set_default_dtype(saved_dtype) + +@contextlib.contextmanager +def set_default_tensor_type(tensor_type): + saved_tensor_type = torch.tensor([]).type() + torch.set_default_tensor_type(tensor_type) + try: + yield + finally: + torch.set_default_tensor_type(saved_tensor_type) + +def iter_indices(tensor): + if tensor.dim() == 0: + return range(0) + if tensor.dim() == 1: + return range(tensor.size(0)) + return product(*(range(s) for s in tensor.size())) + + +def is_iterable(obj): + try: + iter(obj) + return True + except TypeError: + return False + + +def is_iterable_of_tensors(iterable, include_empty=False): + """ Returns True if iterable is an iterable of tensors and False o.w. + + If the iterable is empty, the return value is :attr:`include_empty` + """ + # Tensor itself is iterable so we check this first + if isinstance(iterable, torch.Tensor): + return False + + try: + if len(iterable) == 0: + return include_empty + + for t in iter(iterable): + if not isinstance(t, torch.Tensor): + return False + + except TypeError as te: + return False + + return True + + +class CudaNonDefaultStream: + def __enter__(self): + # Before starting CUDA test save currently active streams on all + # CUDA devices and set new non default streams to all CUDA devices + # to ensure CUDA tests do not use default stream by mistake. + beforeDevice = torch.cuda.current_device() + self.beforeStreams = [] + for d in range(torch.cuda.device_count()): + self.beforeStreams.append(torch.cuda.current_stream(d)) + deviceStream = torch.cuda.Stream(device=d) + self.beforeStreams[-1].synchronize() + torch._C._cuda_setStream(stream_id=deviceStream.stream_id, + device_index=deviceStream.device_index, + device_type=deviceStream.device_type) + torch._C._cuda_setDevice(beforeDevice) + + def __exit__(self, exec_type, exec_value, traceback): + # After completing CUDA test load previously active streams on all + # CUDA devices. + beforeDevice = torch.cuda.current_device() + for d in range(torch.cuda.device_count()): + torch._C._cuda_setStream(stream_id=self.beforeStreams[d].stream_id, + device_index=self.beforeStreams[d].device_index, + device_type=self.beforeStreams[d].device_type) + torch._C._cuda_setDevice(beforeDevice) + +class CudaMemoryLeakCheck: + def __init__(self, testcase, name=None): + self.name = testcase.id() if name is None else name + self.testcase = testcase + + # initialize context & RNG to prevent false positive detections + # when the test is the first to initialize those + from torch.testing._internal.common_cuda import initialize_cuda_context_rng + initialize_cuda_context_rng() + + # Stores CUDA memory data provided by PyTorch's caching allocator and + # the CUDA driver. + # + # NOTE: The undocumented torch.cuda.mem_get_info() returns + # (#free bytes, #total bytes available) on the GPU + def __enter__(self): + self.caching_allocator_befores = [] + self.driver_befores = [] + + # Performs a gc if required (required if any CUDA memory is held) + num_devices = torch.cuda.device_count() + for i in range(num_devices): + caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) + # NOTE: gc is based exclusively on caching allocator memory + # because the driver will always have some bytes in use (context size?) + if caching_allocator_mem_allocated > 0: + gc.collect() + torch._C._cuda_clearCublasWorkspaces() + torch.cuda.empty_cache() + break + + # Acquires caching allocator and driver statistics before the test is run + for i in range(num_devices): + self.caching_allocator_befores.append(torch.cuda.memory_allocated(i)) + bytes_free, bytes_total = torch.cuda.mem_get_info(i) + driver_mem_allocated = bytes_total - bytes_free + self.driver_befores.append(driver_mem_allocated) + + def __exit__(self, exec_type, exec_value, traceback): + # Don't check for leaks if an exception was thrown + if exec_type is not None: + return + + # Compares caching allocator before/after statistics + # An increase in allocated memory is a discrepancy indicating a possible + # memory leak + discrepancy_detected = False + num_devices = torch.cuda.device_count() + for i in range(num_devices): + # avoid counting cublasWorkspace allocations + torch._C._cuda_clearCublasWorkspaces() + caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) + + if caching_allocator_mem_allocated > self.caching_allocator_befores[i]: + discrepancy_detected = True + break + + # Short-circuits if no discrepancy detected + if not discrepancy_detected: + return + + # Validates the discrepancy persists after garbage collection and + # is confirmed by the driver API + + # NOTE: driver API iscrepancies alone are ignored because with the jiterator + # some tests may permanently increase the CUDA context size and + # that will appear as a driver memory leak but is the expected behavior. + + # GCs and clears the cache + gc.collect() + torch.cuda.empty_cache() + + for i in range(num_devices): + + discrepancy_detected = True + + # Query memory multiple items to ensure leak was not transient + for n in range(3): + caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) + bytes_free, bytes_total = torch.cuda.mem_get_info(i) + driver_mem_allocated = bytes_total - bytes_free + + caching_allocator_discrepancy = False + driver_discrepancy = False + + if caching_allocator_mem_allocated > self.caching_allocator_befores[i]: + caching_allocator_discrepancy = True + + if driver_mem_allocated > self.driver_befores[i]: + driver_discrepancy = True + + if not(caching_allocator_discrepancy or driver_discrepancy): + # Leak was false positive, exit loop + discrepancy_detected = False + break + + if not discrepancy_detected: + continue + + if caching_allocator_discrepancy and not driver_discrepancy: + # Just raises a warning if the leak is not validated by the + # driver API + # NOTE: this may be a problem with how the caching allocator collects its + # statistics or a leak too small to trigger the allocation of an + # additional block of memory by the CUDA driver + msg = ("CUDA caching allocator reports a memory leak not " + "verified by the driver API in {}! " + "Caching allocator allocated memory was {} and is now reported as {} " + "on device {}. " + "CUDA driver allocated memory was {} and is now {}.").format( + self.name, + self.caching_allocator_befores[i], + caching_allocator_mem_allocated, + i, + self.driver_befores[i], + driver_mem_allocated) + warnings.warn(msg) + elif caching_allocator_discrepancy and driver_discrepancy: + # A caching allocator discrepancy validated by the driver API is a + # failure (except on ROCm, see below) + msg = ("CUDA driver API confirmed a leak in {}! " + "Caching allocator allocated memory was {} and is now reported as {} " + "on device {}. " + "CUDA driver allocated memory was {} and is now {}.").format( + self.name, + self.caching_allocator_befores[i], + caching_allocator_mem_allocated, + i, + self.driver_befores[i], + driver_mem_allocated) + + raise RuntimeError(msg) + +@contextmanager +def skip_exception_type(exc_type): + try: + yield + except exc_type as e: + raise unittest.SkipTest(f"not implemented: {e}") from e + +@contextmanager +def print_repro_on_failure(repro_str): + try: + yield + except unittest.SkipTest: + raise + except Exception as e: + # NB: Hacking the exception args is the cleanest way I've found to append + # failure reproduction info without poisoning the stack trace. + if len(e.args) >= 1: + e.args = (f"{e.args[0]}\n{repro_str}", *e.args[1:]) + raise + +# "min_satisfying_examples" setting has been deprecated in hypothesis +# 3.56.0 and removed in hypothesis 4.x +try: + import hypothesis + + def settings(*args, **kwargs): + if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0): + kwargs.pop('min_satisfying_examples') + return hypothesis.settings(*args, **kwargs) + + + hypothesis.settings.register_profile( + "pytorch_ci", + settings( + derandomize=True, + suppress_health_check=[hypothesis.HealthCheck.too_slow], + database=None, + max_examples=50, + verbosity=hypothesis.Verbosity.normal)) + hypothesis.settings.register_profile( + "dev", + settings( + suppress_health_check=[hypothesis.HealthCheck.too_slow], + database=None, + max_examples=10, + verbosity=hypothesis.Verbosity.normal)) + hypothesis.settings.register_profile( + "debug", + settings( + suppress_health_check=[hypothesis.HealthCheck.too_slow], + database=None, + max_examples=1000, + verbosity=hypothesis.Verbosity.verbose)) + + hypothesis.settings.load_profile( + "pytorch_ci" if IS_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev') + ) +except ImportError: + print('Fail to import hypothesis in common_utils, tests are not derandomized') + +# Used in check_if_enable to see if a test method should be disabled by an issue, +# sanitizes a test method name from appended suffixes by @dtypes parametrization. +# e.g., an issue with title "DISABLED test_bitwise_ops (__main__.TestBinaryUfuncs)" should +# disabled ALL parametrized test_bitwise_ops tests, such test_bitwise_ops_cuda_int32 +def remove_device_and_dtype_suffixes(test_name: str) -> str: + # import statement is localized to avoid circular dependency issues with common_device_type.py + from torch.testing._internal.common_device_type import get_device_type_test_bases + device_suffixes = [x.device_type for x in get_device_type_test_bases()] + dtype_suffixes = [str(dt)[len("torch."):] for dt in get_all_dtypes()] + + test_name_chunks = test_name.split("_") + if len(test_name_chunks) > 0 and test_name_chunks[-1] in dtype_suffixes: + if len(test_name_chunks) > 1 and test_name_chunks[-2] in device_suffixes: + return "_".join(test_name_chunks[0:-2]) + return "_".join(test_name_chunks[0:-1]) + return test_name + + +def check_if_enable(test: unittest.TestCase): + classname = str(test.__class__).split("'")[1].split(".")[-1] + sanitized_testname = remove_device_and_dtype_suffixes(test._testMethodName) + + def matches_test(target: str): + target_test_parts = target.split() + if len(target_test_parts) < 2: + # poorly formed target test name + return False + target_testname = target_test_parts[0] + target_classname = target_test_parts[1][1:-1].split(".")[-1] + # if test method name or its sanitized version exactly matches the disabled + # test method name AND allow non-parametrized suite names to disable + # parametrized ones (TestSuite disables TestSuiteCPU) + return classname.startswith(target_classname) and (target_testname in (test._testMethodName, sanitized_testname)) + + if any(matches_test(x) for x in slow_tests_dict.keys()): + getattr(test, test._testMethodName).__dict__['slow_test'] = True + if not TEST_WITH_SLOW: + raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test") + + if not IS_SANDCASTLE: + should_skip = False + skip_msg = "" + + for disabled_test, (issue_url, platforms) in disabled_tests_dict.items(): + if matches_test(disabled_test): + platform_to_conditional: Dict = { + "mac": IS_MACOS, + "macos": IS_MACOS, + "win": IS_WINDOWS, + "windows": IS_WINDOWS, + "linux": IS_LINUX, + "rocm": TEST_WITH_ROCM, + "asan": TEST_WITH_ASAN, + "dynamo": TEST_WITH_TORCHDYNAMO, + "inductor": TEST_WITH_TORCHINDUCTOR, + "slow": TEST_WITH_SLOW, + } + + invalid_platforms = list(filter(lambda p: p not in platform_to_conditional, platforms)) + if len(invalid_platforms) > 0: + invalid_plats_str = ", ".join(invalid_platforms) + valid_plats = ", ".join(platform_to_conditional.keys()) + + print(f"Test {disabled_test} is disabled for some unrecognized ", + f"platforms: [{invalid_plats_str}]. Please edit issue {issue_url} to fix the platforms ", + "assigned to this flaky test, changing \"Platforms: ...\" to a comma separated ", + f"subset of the following (or leave it blank to match all platforms): {valid_plats}") + + # Sanitize the platforms list so that we continue to disable the test for any valid platforms given + platforms = list(filter(lambda p: p in platform_to_conditional, platforms)) + + if platforms == [] or any(platform_to_conditional[platform] for platform in platforms): + should_skip = True + skip_msg = f"Test is disabled because an issue exists disabling it: {issue_url}" \ + f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " \ + "If you're seeing this on your local machine and would like to enable this test, " \ + "please make sure CI is not set and you are not using the flag --import-disabled-tests." + break + + if should_skip and not RERUN_DISABLED_TESTS: + # Skip the disabled test when not running under --rerun-disabled-tests verification mode + raise unittest.SkipTest(skip_msg) + + if not should_skip and RERUN_DISABLED_TESTS: + skip_msg = "Test is enabled but --rerun-disabled-tests verification mode is set, so only" \ + " disabled tests are run" + raise unittest.SkipTest(skip_msg) + + if TEST_SKIP_FAST: + if hasattr(test, test._testMethodName) and not getattr(test, test._testMethodName).__dict__.get('slow_test', False): + raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST") + + +# `TestCase.assertEqual` is very permissive and coerced the inputs into a format that could be compared. This is very +# convenient when writing tests, but not so much while reviewing them. By default, the comparison `Pair` framework of +# `torch.testing._comparison.are_equal`, used for example by the public testing function +# `torch.testing.assert_close`, is more strict. In order to use the same framework and thus reduce the divergence +# between internal and external comparison logic as much as possible, we define some "relaxed" pairs here. They only +# change the supported inputs, but the comparison logic is the same. +# TODO: Revisit the relaxed pairs and check how much work it is to fix the tests that would fail without the relaxation. + +class RelaxedBooleanPair(BooleanPair): + """Pair for boolean-like inputs. + + In contrast to the builtin :class:`BooleanPair`, this class also supports one input being a number or a single + element tensor-like. + """ + _supported_number_types = NumberPair(0, 0)._supported_types + + def _process_inputs(self, actual, expected, *, id): + # We require only one of the inputs of the inputs to be a boolean and the other can also be a boolean, a + # number, or a single element tensor or array, whereas in default BooleanPair both inputs have to be booleans. + tensor_or_array_types: Tuple[Type, ...] = (torch.Tensor, np.ndarray) + other_supported_types = (*self._supported_types, *self._supported_number_types, *tensor_or_array_types) + if not ( + (isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types)) + or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types)) + ): + self._inputs_not_supported() + + return [self._to_bool(input, id=id) for input in (actual, expected)] + + def _to_bool(self, bool_like, *, id): + if isinstance(bool_like, np.number): + return bool(bool_like.item()) + elif type(bool_like) in self._supported_number_types: + return bool(bool_like) + elif isinstance(bool_like, (torch.Tensor, np.ndarray)): + numel = bool_like.numel() if isinstance(bool_like, torch.Tensor) else bool_like.size + if numel > 1: + self._fail( + ValueError, + f"Only single element tensor-likes can be compared against a boolean. " + f"Got {numel} elements instead.", + id=id + ) + + return bool(bool_like.item()) + else: + return super()._to_bool(bool_like, id=id) + + +class RelaxedNumberPair(NumberPair): + """Pair for number-like inputs. + + In contrast to the builtin :class:`NumberPair`, this class also supports one input being a single element + tensor-like or a :class:`enum.Enum`. (D)Type checks are disabled, meaning comparing 1 to 1.0 succeeds even when + ``check_dtype=True`` is passed. + + In addition, this class uses looser default tolerances for :class:`float` and :class:`complex` inputs. Also + supports overriding the absolute and relative tolerance through the ``@precisionOverride`` and + ``@toleranceOverride`` decorators. + """ + _TYPE_TO_DTYPE = { + int: torch.int64, + float: torch.float32, + complex: torch.complex64, + } + + def __init__( + self, actual, expected, *, rtol_override=0.0, atol_override=0.0, check_dtype=None, **other_parameters + ) -> None: + super().__init__(actual, expected, check_dtype=False, **other_parameters) + self.rtol = max(self.rtol, rtol_override) + self.atol = max(self.atol, atol_override) + + def _process_inputs(self, actual, expected, *, id): + # We require only one of the inputs of the inputs to be a number and the other can also be a number or a single + # element tensor or array, whereas in default NumberPair both inputs have to be numbers. + tensor_or_array_types: Tuple[Type, ...] = (torch.Tensor, np.ndarray) + other_supported_types = (*self._supported_types, *tensor_or_array_types) + if not ( + (isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types)) + or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types)) + ): + self._inputs_not_supported() + + return [self._to_number(input, id=id) for input in (actual, expected)] + + def _to_number(self, number_like, *, id): + if isinstance(number_like, (torch.Tensor, np.ndarray)): + numel = number_like.numel() if isinstance(number_like, torch.Tensor) else number_like.size + if numel > 1: + self._fail( + ValueError, + f"Only single element tensor-likes can be compared against a number. " + f"Got {numel} elements instead.", + id=id + ) + number = number_like.item() + if isinstance(number, bool): + number = int(number) + + return number + elif isinstance(number_like, Enum): + return int(number_like) # type: ignore[call-overload] + else: + return super()._to_number(number_like, id=id) + + +class TensorOrArrayPair(TensorLikePair): + """Pair for tensor-like inputs. + + On the one hand this class is stricter than the builtin :class:`TensorLikePair` since it only allows instances of + :class:`torch.Tensor` and :class:`numpy.ndarray` rather than allowing any tensor-like than can be converted into a + tensor. On the other hand this class is looser since it converts all inputs into tensors with no regard of their + relationship, e.g. comparing a :class:`torch.Tensor` to :class:`numpy.ndarray` is fine. + + In addition, this class supports overriding the absolute and relative tolerance through the ``@precisionOverride`` + and ``@toleranceOverride`` decorators. + """ + def __init__(self, actual, expected, *, rtol_override=0.0, atol_override=0.0, **other_parameters): + super().__init__(actual, expected, **other_parameters) + self.rtol = max(self.rtol, rtol_override) + self.atol = max(self.atol, atol_override) + + def _process_inputs(self, actual, expected, *, id, allow_subclasses): + self._check_inputs_isinstance(actual, expected, cls=(torch.Tensor, np.ndarray)) + + actual, expected = (self._to_tensor(input) for input in (actual, expected)) + for tensor in (actual, expected): + self._check_supported(tensor, id=id) + return actual, expected + + +class TypedStoragePair(TensorLikePair): + """Pair for :class:`torch.storage.TypedStorage` inputs.""" + def __init__(self, actual, expected, *, rtol_override=0.0, atol_override=0.0, **other_parameters): + self._check_inputs_isinstance(actual, expected, cls=torch.storage.TypedStorage) + super().__init__(actual, expected, **other_parameters) + self.rtol = max(self.rtol, rtol_override) + self.atol = max(self.atol, atol_override) + + def _to_tensor(self, typed_storage): + return torch.tensor( + typed_storage._untyped_storage, + dtype={ + torch.quint8: torch.uint8, + torch.quint4x2: torch.uint8, + torch.quint2x4: torch.uint8, + torch.qint32: torch.int32, + torch.qint8: torch.int8 + }.get(typed_storage.dtype, typed_storage.dtype), + device=typed_storage.device, + ) + + +class UnittestPair(Pair): + """Fallback ABC pair that handles non-numeric inputs. + + To avoid recreating the mismatch messages of :meth:`unittest.TestCase.assertEqual`, this pair simply wraps it in + order to use it with the :class:`Pair` "framework" from :func:`are_equal`. + + Define the :attr:`UnittestPair.CLS` in a subclass to indicate which class(es) of the inputs the pair should support. + """ + CLS: Union[Type, Tuple[Type, ...]] + TYPE_NAME: Optional[str] = None + + def __init__(self, actual, expected, **other_parameters): + self._check_inputs_isinstance(actual, expected, cls=self.CLS) + super().__init__(actual, expected, **other_parameters) + + def compare(self): + test_case = unittest.TestCase() + + try: + return test_case.assertEqual(self.actual, self.expected) + except test_case.failureException as error: + msg = str(error) + + type_name = self.TYPE_NAME or (self.CLS if isinstance(self.CLS, type) else self.CLS[0]).__name__ + self._fail(AssertionError, f"{type_name.title()} comparison failed: {msg}") + + +class StringPair(UnittestPair): + CLS = (str, bytes) + TYPE_NAME = "string" + + +class SetPair(UnittestPair): + CLS = set + + +class TypePair(UnittestPair): + CLS = type + + +class ObjectPair(UnittestPair): + CLS = object + + +# This implements a variant of assertRaises/assertRaisesRegex where we first test +# if the exception is NotImplementedError, and if so just skip the test instead +# of failing it. +# +# This is implemented by inheriting from the (private) implementation of +# assertRaises from unittest.case, and slightly tweaking it for this new +# behavior. The year is 2021: this private class hierarchy hasn't changed since +# 2010, seems low risk to inherit from. +class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext): + def __exit__(self, exc_type, exc_value, tb): + if exc_type is not None and issubclass(exc_type, NotImplementedError): + self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined] + return super().__exit__(exc_type, exc_value, tb) + + +@contextmanager +def set_warn_always_context(new_val: bool): + old_val = torch.is_warn_always_enabled() + torch.set_warn_always(new_val) + try: + yield + finally: + torch.set_warn_always(old_val) + + +class NoTest: + # causes pytest to not recognize this class as a test + __test__ = False + + +class TestCase(expecttest.TestCase): + # NOTE: "precision" lets classes and generated tests set minimum + # atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for + # example. + # NOTE: "rel_tol" lets classes and generated tests set minimum + # rtol values when comparing tensors. Used by @toleranceOverride, for example. + _precision: float = 0 + _rel_tol: float = 0 + + # Toggles whether to assert that `torch.get_default_dtype()` returns + # `torch.float` when `setUp` and `tearDown` are called. + _default_dtype_check_enabled: bool = False + + # Always use difflib to print diffs on multi line equality. + # Undocumented feature in unittest + _diffThreshold = sys.maxsize + maxDiff = None + + # checker to early terminate test suite if unrecoverable failure occurs. + def _should_stop_test_suite(self): + if torch.cuda.is_initialized(): + # CUDA device side error will cause subsequence test cases to fail. + # stop entire test suite if catches RuntimeError during torch.cuda.synchronize(). + try: + torch.cuda.synchronize() + except RuntimeError as rte: + print("TEST SUITE EARLY TERMINATION due to torch.cuda.synchronize() failure", file=sys.stderr) + print(str(rte), file=sys.stderr) + return True + return False + else: + return False + + @property + def precision(self) -> float: + return self._precision + + @precision.setter + def precision(self, prec: float) -> None: + self._precision = prec + + @property + def rel_tol(self) -> float: + return self._rel_tol + + @rel_tol.setter + def rel_tol(self, prec: float) -> None: + self._rel_tol = prec + + _do_cuda_memory_leak_check = False + _do_cuda_non_default_stream = False + + # When True, if a test case raises a NotImplementedError, instead of failing + # the test, skip it instead. + _ignore_not_implemented_error = False + + def __init__(self, method_name='runTest'): + super().__init__(method_name) + + test_method = getattr(self, method_name, None) + if test_method is not None: + # Wraps the tested method if we should do CUDA memory check. + if TEST_CUDA_MEM_LEAK_CHECK: + self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True) + # FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044 + if self._do_cuda_memory_leak_check and not IS_WINDOWS: + self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors) + + # Wraps the tested method if we should enforce non default CUDA stream. + self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True) + if self._do_cuda_non_default_stream and not IS_WINDOWS: + self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream) + + if self._ignore_not_implemented_error: + self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError)) + + if PRINT_REPRO_ON_FAILURE: + env_var_prefix = TestEnvironment.repro_env_var_prefix() + try: + def _get_rel_test_path(abs_test_path): + # Attempt to get relative path based on the "test" dir. + # In CI, the working dir is not guaranteed to be the base repo dir so + # we can't just compute relative path from that. + parts = Path(abs_test_path).parts + for i, part in enumerate(parts): + if part == "test": + base_dir = os.path.join(*parts[:i]) + return os.path.relpath(abs_test_path, start=base_dir) + + # Can't determine containing dir; just return the test filename. + # The path isn't strictly correct but it's arguably better than nothing. + return os.path.split(abs_test_path)[1] + + test_filename = _get_rel_test_path(inspect.getfile(type(self))) + repro_str = f""" +To execute this test, run the following from the base repo dir: + {env_var_prefix} python {test_filename} -k {method_name} + +This message can be suppressed by setting PYTORCH_PRINT_REPRO_ON_FAILURE=0""" + self.wrap_with_policy( + method_name, + lambda repro_str=repro_str: print_repro_on_failure(repro_str=repro_str)) + except Exception as e: + # Don't fail entirely if we can't get the test filename + log.info("could not print repro string", extra=str(e)) + + def assertLeaksNoCudaTensors(self, name=None): + name = self.id() if name is None else name + return CudaMemoryLeakCheck(self, name) + + def enforceNonDefaultStream(self): + return CudaNonDefaultStream() + + def assertExpectedInline(self, actual, expect, skip=0): + return super().assertExpectedInline(actual if isinstance(actual, str) else str(actual), expect, skip + 1) + + # Munges exceptions that internally contain stack traces, using munge_exc + def assertExpectedInlineMunged( + self, exc_type, callable, expect, *, suppress_suffix=True + ): + try: + callable() + except exc_type as e: + self.assertExpectedInline( + munge_exc(e, suppress_suffix=suppress_suffix, skip=1), expect, skip=1 + ) + return + self.fail(msg="Did not raise when expected to") + + def assertLogs(self, logger=None, level=None): + if logger is None: + logger = logging.getLogger("torch") + return super().assertLogs(logger, level) + + def assertNoLogs(self, logger=None, level=None): + if logger is None: + logger = logging.getLogger("torch") + return super().assertNoLogs(logger, level) + + def wrap_with_cuda_policy(self, method_name, policy): + test_method = getattr(self, method_name) + # the import below may initialize CUDA context, so we do it only if + # self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream + # is True. + # TODO: sure looks like we unconditionally initialize the context here + # -- ezyang + from torch.testing._internal.common_cuda import TEST_CUDA + fullname = self.id().lower() # class_name.method_name + if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname): + setattr(self, method_name, self.wrap_method_with_policy(test_method, policy)) + + def wrap_with_policy(self, method_name, policy): + test_method = getattr(self, method_name) + setattr(self, method_name, self.wrap_method_with_policy(test_method, policy)) + + # A policy is a zero-argument function that returns a context manager. + # We don't take the context manager directly as it may be necessary to + # construct it once per test method + def wrap_method_with_policy(self, method, policy): + # Assumes that `method` is the tested function in `self`. + # NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope + # alive, so this cannot be done in setUp and tearDown because + # tearDown is run unconditionally no matter whether the test + # passes or not. For the same reason, we can't wrap the `method` + # call in try-finally and always do the check. + @wraps(method) + def wrapper(self, *args, **kwargs): + with policy(): + method(*args, **kwargs) + return types.MethodType(wrapper, self) + + def wrap_with_cuda_memory_check(self, method): + return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors) + + # Recursive function that incorporates retry logic when PYTORCH_RETRY_TEST_CASES=1 and enables early test + # termination. [DISCLAIMER: ONLY WORKS WITH UNITTEST] + # When report_only is True, flaky tests are only reported, but the signal remains the same (the test will still + # show up red). + # Otherwise, the flaky test will show up green while its stats are captured by test reports. + def _run_with_retry(self, result=None, num_runs_left=0, report_only=True, num_red=0, num_green=0): + using_unittest = isinstance(result, unittest.TestResult) + if num_runs_left == 0: + # The logic when RERUN_DISABLED_TESTS is set to true is as follows: + # |-if the disabled test passes: + # |-- if it's flaky: + # |--- Do nothing because it's still flaky + # |-- elif it isn't flaky anymore: + # |--- Close the disabled ticket (later) + # | + # |- elif the disabled test fails after n retries: + # |-- This is expected, report this but don't fail the job + skipped_msg = { + "num_red": num_red, + "num_green": num_green, + "max_num_retries": MAX_NUM_RETRIES, + "rerun_disabled_test": RERUN_DISABLED_TESTS, + } + + traceback_str = "" + if RERUN_DISABLED_TESTS and using_unittest: + # Hide all failures and errors when RERUN_DISABLED_TESTS is enabled. This is + # a verification check, we don't want more red signals coming from it + if result.failures: + _, traceback_str = result.failures.pop(-1) + if result.errors: + _, traceback_str = result.errors.pop(-1) + + if traceback_str: + skipped_msg["traceback_str"] = traceback_str + + if num_green == 0: + # The disabled test fails, report as skipped but don't fail the job + result.addSkip(self, json.dumps(skipped_msg)) + + if num_red == 0: + # The test passes after re-running multiple times. This acts as a signal + # to confirm that it's not flaky anymore + result.addSuccess(self) + + if num_green > 0 and num_red > 0 and using_unittest: + skipped_msg["flaky"] = True + # Still flaky, do nothing + result.addSkip(self, json.dumps(skipped_msg)) + + return + + if using_unittest: + # Keep track of the number of tests marked as failures, errors, and skipped before starting + failures_before = 0 if result is None else len(result.failures) + errors_before = 0 if result is None else len(result.errors) + skipped_before = 0 if result is None else len(result.skipped) + + super_run = super().run + test_cls = super_run.__self__ + + # Are we compiling? + compiled = TEST_WITH_TORCHDYNAMO or TEST_WITH_AOT_EAGER or TEST_WITH_TORCHINDUCTOR + # Is the class strict and compiling? + strict_mode = getattr(test_cls, "dynamo_strict", False) and compiled + + if strict_mode: + torch._dynamo.reset() + + # TODO: Remove this; this is grandfathered in because we suppressed errors + # on test suite previously + # When strict mode is False, supress_errors is True + if compiled: + supress_errors = not strict_mode + else: + supress_errors = torch._dynamo.config.suppress_errors + with unittest.mock.patch("torch._dynamo.config.suppress_errors", supress_errors): + if TEST_WITH_TORCHINDUCTOR: + super_run = torch._dynamo.optimize("inductor", save_config=False)(super_run) + elif TEST_WITH_AOT_EAGER: + super_run = torch._dynamo.optimize("aot_eager_decomp_partition", save_config=False)(super_run) + elif TEST_WITH_TORCHDYNAMO: + # TorchDynamo optimize annotation + super_run = torch._dynamo.optimize("eager", save_config=False, nopython=strict_mode)(super_run) + + super_run(result=result) + + if strict_mode: + torch._dynamo.reset() + + # Early terminate test if necessary. + if self._should_stop_test_suite(): + if result.wasSuccessful(): + case = TestCase() + if TEST_SAVE_XML is not None: + # This is a big hacky, XMLRunner modifies expected type from TestCase to TestInfo + # Create dummy TestInfo to record results correctly + from xmlrunner.result import _TestInfo # type: ignore[import] + case = _TestInfo(result, case) + case.output = _TestInfo.ERROR + case.elapsed_time = 0.0 + case.test_description = "TestSuiteEarlyFailure" + # This shouldn't really happen, but if does add fake failure + # For more details see https://github.com/pytorch/pytorch/issues/71973 + result.failures.append((case, "TestSuite execution was aborted early")) + assert result.wasSuccessful() is False + result.stop() + + if not RETRY_TEST_CASES or not using_unittest: + return + + err = sys.exc_info() + num_retries_left = num_runs_left - 1 + if failures_before < len(result.failures): + print(f" {self._testMethodName} failed - num_retries_left: {num_retries_left}") + if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0): + _, traceback_str = result.failures.pop(-1) + print(traceback_str) + result.addExpectedFailure(self, err) + self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only, + num_red=num_red + 1, num_green=num_green) + elif errors_before < len(result.errors): + print(f" {self._testMethodName} errored - num_retries_left: {num_retries_left}") + if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0): + _, traceback_str = result.errors.pop(-1) + print(traceback_str) + result.addExpectedFailure(self, err) + self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only, + num_red=num_red + 1, num_green=num_green) + elif RERUN_DISABLED_TESTS and num_retries_left <= MAX_NUM_RETRIES and skipped_before == len(result.skipped): + # Always re-run up to MAX_NUM_RETRIES when running under rerun disabled tests modes if the test successes. + # The parameter num_retries_left can be equal to MAX_NUM_RETRIES here because num_runs_left is initially + # set to MAX_NUM_RETRIES + 1, i.e. the first run successes + # + # Also if the result is skipped, this is due to check_if_enable skipping non-disabled tests, thus we + # want to ignore them, not retrying and skipping multiple times + print(f" {self._testMethodName} succeeded - num_retries_left: {num_retries_left}") + result.addSuccess(self) + self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only, + num_red=num_red, num_green=num_green + 1) + elif report_only and num_retries_left < MAX_NUM_RETRIES: + # The original logic here is that num_retries_left must be smaller than MAX_NUM_RETRIES indicating + # that at least one retry has been spent + print(f" {self._testMethodName} succeeded - num_retries_left: {num_retries_left}") + result.addUnexpectedSuccess(self) + self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only, + num_red=num_red, num_green=num_green + 1) + elif not report_only and num_retries_left < MAX_NUM_RETRIES: + # in this case, our test was rerun (as a retry has been used) and it just passed. + # we incur one more recursive call with num_runs_left = 0 to allow for accurate flaky reporting + self._run_with_retry(result=result, num_runs_left=0, report_only=report_only, + num_red=num_red, num_green=num_green + 1) + + + def run(self, result=None): + with contextlib.ExitStack() as stack: + if TEST_WITH_CROSSREF: + stack.enter_context(CrossRefMode()) + num_runs = MAX_NUM_RETRIES + 1 if RETRY_TEST_CASES else 1 + self._run_with_retry( + result=result, + num_runs_left=num_runs, + report_only=not OVERRIDE_FLAKY_SIGNAL, + num_red=0, + num_green=0) + + def setUp(self): + check_if_enable(self) + set_rng_seed(SEED) + + # Save global check sparse tensor invariants state that can be + # restored from tearDown: + self._check_invariants = torch.sparse.check_sparse_tensor_invariants.is_enabled() + + # Enable invariant checks for all sparse tensors constructions + # including the unsafe ones. If this is not desired for some + # test case, use check_invariants=False optional argument to + # sparse tensor constructors or + # @torch.sparse.check_sparse_tensor_invariants(False) + # decorator to disable the invariant checks. + torch.sparse.check_sparse_tensor_invariants.enable() + + if self._default_dtype_check_enabled: + assert torch.get_default_dtype() == torch.float + + def tearDown(self): + # There exists test cases that override TestCase.setUp + # definition, so we cannot assume that _check_invariants + # attribute is defined in general. + if hasattr(self, '_check_invariants'): + # Restore the global check sparse tensor invariants state + if self._check_invariants: + torch.sparse.check_sparse_tensor_invariants.enable() + else: + torch.sparse.check_sparse_tensor_invariants.disable() + + if self._default_dtype_check_enabled: + assert torch.get_default_dtype() == torch.float + + @staticmethod + def _make_crow_indices(n_rows, n_cols, nnz, + *, device, dtype, random=True): + """Return crow_indices of a CSR tensor with size (n_rows, n_cols) and + the number of specified elements nnz. + + If random is True, the column counts of rows are in random + order. Otherwise, the column counts of rows are defined by the + used sampling method. + + Sampling method + --------------- + + The used sampling method was introduced in + https://pearu.github.io/csr_sampling.html, and here we give + only an overall description of the method. + + Notice that crow_indices can be defined as cumsum(counts) + where counts is a sequence of non-negative integers satisfying + the following conditions: + + len(counts) == n_rows + 1 + counts.max() <= n_cols + + while counts[i + 1] is interpreted as the number of specified + elements in the i-th row. + + The used sampling method aims at increasing the diversity of + CSR samples, that is, a CSR sample should contain (i) rows + that are all filled, (ii) rows with no elements at all, and + (iii) rows that are partially filled. At the same time and for + the given total number of specified elements (nnz), there + should be minimal preference to rows with a given number of + elements. To achieve this, the sampling method is built-up on + using a sawteeth model for counts. In the simplest case, we + would have + + counts = arange(n_rows + 1) % (n_cols + 1) + + that has equal number of all possible column counts per row. + This formula can be used only for specific input values of + n_rows, n_cols, and nnz. To generalize this model to any + combinations of inputs, the counts model above is extended + with an incomplete sawtooth, and the right and lower + rectangular parts that will guarantee that + + counts.sum() == nnz + + for any combination of n_rows, n_cols, and nnz. Basically, + we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid + that is able to hold a sequence of sawteeth and so-called + final correction, while the external part of the window is + filled with counts to meet the nnz constraint exactly. + """ + assert 0 <= nnz <= n_rows * n_cols, (nnz, n_rows, n_cols) + + def sawteeth(n, m): + # return the total number of counts in the sequence of + # sawteeth where n and m define a window in (n_rows+1, + # n_cols+1) rectangle where the sequence of sawteeth + # perfectly fit. + M = (n_cols - m) * (n_cols - m + 1) // 2 + K = (n_rows - n) % (n_cols - m + 1) + return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2 + + # Different from the original method description, here counts + # has leading 0 required by crow_indices: + counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu')) + + n = m = 0 + N = sawteeth(n, m) + if N and nnz >= max(N, n_cols): + # determine the width of the sawteeth window. We use bisection to solve + # N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols) + # for n + n_left = n + n_right = n_rows - 1 + N_right = sawteeth(n_right, m) + while n_right - n_left > 1: + n_middle = (n_left + n_right) // 2 + N_middle = sawteeth(n_middle, m) + if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols): + n_right, N_right = n_middle, N_middle + else: + n_left = n_middle + n, N = n_right, N_right + # fill the right rectangle with counts: + assert n + counts[-n:].fill_(n_cols) + + if N and nnz - n * n_cols >= max(N, n_rows - n): + # determine the height of the sawteeth window. We use bisection to solve + # N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n) + # for m. + m_left = m + m_right = n_cols - 1 + N_right = sawteeth(n, m_right) + while m_right - m_left > 1: + m_middle = (m_left + m_right) // 2 + N_middle = sawteeth(n, m_middle) + if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n): + m_right, N_right = m_middle, N_middle + else: + m_left = m_middle + m, N = m_right, N_right + # fill the bottom rectangle with counts: + assert m + counts[1:n_rows - n + 1].fill_(m) + + if N: + # fill the sawteeth window with counts + q, r = divmod(nnz - n * n_cols - m * (n_rows - n), + (n_cols - m) * (n_cols - m + 1) // 2) + p = 1 + q * (n_cols - m + 1) + k = math.isqrt(2 * r) + if k * (k + 1) > 2 * r: + k -= 1 + corr = r - k * (k + 1) // 2 + assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle + # sequence of full sawteeth: + counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1) + # incomplete sawtooth: + counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device) + else: + # given input does not support sawteeth + p = 1 + corr = nnz - n * n_cols - m * (n_rows - n) + + # correction that will guarantee counts.sum() == nnz: + counts[p] += corr + + if random: + # randomize crow_indices by shuffling the sawteeth + # sequence: + perm = torch.randperm(n_rows, device=counts.device) + counts[1:] = counts[1:][perm] + + # compute crow_indices: + crow_indices = counts + crow_indices.cumsum_(dim=0) + return crow_indices.to(device=device) + + def genSparseCompressedTensor(self, size, nnz, *, layout, device, dtype, index_dtype, blocksize=(), dense_dims=0): + from operator import mul + from functools import reduce + sparse_dim = 2 + assert all(size[d] > 0 for d in range(len(size))) or nnz == 0, 'invalid arguments' + assert len(size) >= sparse_dim + if blocksize: + assert len(blocksize) == 2, (size, blocksize) + assert size[-2 - dense_dims] % blocksize[0] == 0, (size, blocksize) + assert size[-1 - dense_dims] % blocksize[1] == 0, (size, blocksize) + blocksize0, blocksize1 = blocksize + else: + blocksize0 = blocksize1 = 1 + + size = tuple(size) + dense_size = size[(len(size) - dense_dims):] + + def random_sparse_compressed(n_compressed_dims, n_plain_dims, nnz): + compressed_indices = self._make_crow_indices(n_compressed_dims, n_plain_dims, nnz, device=device, dtype=index_dtype) + plain_indices = torch.zeros(nnz, dtype=index_dtype, device=device) + for i in range(n_compressed_dims): + count = compressed_indices[i + 1] - compressed_indices[i] + plain_indices[compressed_indices[i]:compressed_indices[i + 1]], _ = torch.sort( + torch.randperm(n_plain_dims, dtype=index_dtype, device=device)[:count]) + low = -1 if dtype != torch.uint8 else 0 + high = 1 if dtype != torch.uint8 else 2 + values = make_tensor((nnz,) + blocksize + dense_size, device=device, dtype=dtype, low=low, high=high) + return values, compressed_indices, plain_indices + + batch_shape = size[:-2 - dense_dims] + n_batch = reduce(mul, batch_shape, 1) + + if layout in {torch.sparse_csr, torch.sparse_bsr}: + n_compressed_dims, n_plain_dims = size[-2 - dense_dims] // blocksize0, size[-1 - dense_dims] // blocksize1 + else: + n_compressed_dims, n_plain_dims = size[-1 - dense_dims] // blocksize1, size[-2 - dense_dims] // blocksize0 + blocknnz = nnz // (blocksize0 * blocksize1) + sparse_tensors = [random_sparse_compressed(n_compressed_dims, n_plain_dims, blocknnz) for _ in range(n_batch)] + sparse_tensors_it = map(list, zip(*sparse_tensors)) + + values = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, blocknnz, *blocksize, *dense_size) + compressed_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1) + plain_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1) + return torch.sparse_compressed_tensor(compressed_indices, plain_indices, + values, size=size, dtype=dtype, layout=layout, device=device) + + def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype, dense_dims=0): + return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csr, device=device, + dtype=dtype, index_dtype=index_dtype, blocksize=(), dense_dims=dense_dims) + + def genSparseCSCTensor(self, size, nnz, *, device, dtype, index_dtype, dense_dims=0): + return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csc, device=device, + dtype=dtype, index_dtype=index_dtype, blocksize=(), dense_dims=0) + + def genSparseBSRTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype, dense_dims=0): + assert len(blocksize) == 2 + return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsr, device=device, + dtype=dtype, index_dtype=index_dtype, blocksize=blocksize, dense_dims=dense_dims) + + def genSparseBSCTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype, dense_dims=0): + assert len(blocksize) == 2 + return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsc, device=device, + dtype=dtype, index_dtype=index_dtype, blocksize=blocksize, dense_dims=dense_dims) + + def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype): + # Assert not given impossible combination, where the sparse dims have + # empty numel, but nnz > 0 makes the indices containing values. + assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments' + + v_size = [nnz] + list(size[sparse_dim:]) + v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1) + i = torch.rand(sparse_dim, nnz, device=device) + i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i)) + i = i.to(torch.long) + if is_uncoalesced: + i1 = i[:, :(nnz // 2), ...] + i2 = i[:, :((nnz + 1) // 2), ...] + i = torch.cat([i1, i2], 1) + x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device) + + if not is_uncoalesced: + x = x.coalesce() + else: + # FIXME: `x` is a sparse view of `v`. Currently rebase_history for + # sparse views is not implemented, so this workaround is + # needed for inplace operations done on `x`, e.g., copy_(). + # Remove after implementing something equivalent to CopySlice + # for sparse views. + # NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards + x = x.detach().clone()._coalesced_(False) + return x, x._indices().clone(), x._values().clone() + + def generate_simple_inputs(self, layout, + device=None, + dtype=None, + index_dtype=None, + enable_batch=True, + enable_hybrid=True, + enable_zero_sized=True, + enable_non_contiguous_indices=True, + enable_non_contiguous_values=True, + enable_batch_variable_nse=False, + output_tensor=True, + patterns=None): + """Generator of simple inputs for tensor constructors of the given layout. + + The generated tensor inputs have the following properties: + + - tensor shapes are minimal but not trivial + - tensor values are sorted sequences for COO and CSR formats, e.g. [1, 2, 3, 4] + - the generated tensors represent the same mathematical tensor for all layouts + - the generated tensors include regular, zero-sized, and optionally, batched or/and hybrid tensors. + - the generated tensors include contiguous or non-contiguous tensors both in indices and values + + If output_tensor is True, yield tensors with the given + layout. Otherwise, yield inputs to the corresponding tensor + constructors: + + - sparse compressed input is defined as + (compressed_indices, plain_indices, values), dict(size=expected_size_from_shape_inference, device=device, dtype=dtype) + + - sparse COO input is defined as + (indices, values), dict(size=expected_size_from_shape_inference, device=device, dtype=dtype) + + - strided input is defined as + (values,), dict(device=device, dtype=dtype) + """ + if index_dtype is None: + index_dtype = torch.int64 + + is_compressed_sparse_layout = layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} + + if output_tensor: + for args, kwargs in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype, + enable_batch=enable_batch, enable_hybrid=enable_hybrid, + enable_zero_sized=enable_zero_sized, + enable_non_contiguous_indices=enable_non_contiguous_indices, + enable_non_contiguous_values=enable_non_contiguous_values, + enable_batch_variable_nse=enable_batch_variable_nse, + output_tensor=False): + if layout is torch.strided: + assert len(args) == 1 + size = kwargs.pop('size', None) # to ensure that a zero-sized tensor has the desired shape + assert size is not None + yield args[0].reshape(size) + elif layout is torch.sparse_coo: + yield torch.sparse_coo_tensor(*args, **kwargs) + elif is_compressed_sparse_layout: + kwargs.update(layout=layout) + yield torch.sparse_compressed_tensor(*args, **kwargs) + else: + assert 0 # unreachable + return + + def get_blockpattern(pattern, blocksize): + basesize = pattern.shape + assert basesize[0] % blocksize[0] == 0, (basesize, blocksize) + assert basesize[1] % blocksize[1] == 0, (basesize, blocksize) + blockpattern = pattern.reshape(-1, + blocksize[0], + basesize[1] // blocksize[1], + blocksize[1]).transpose(-3, -2).any(-1).any(-1) + block_ids = torch.arange(1, blockpattern.numel() + 1).reshape(blockpattern.shape) + return (blockpattern != 0) * block_ids + + def get_sparse_data(pattern): + basesize = pattern.shape + assert len(basesize) == 2, basesize # pattern is expected to be a matrix + + # We cannot use `torch.sparse_xyz_tensor(pattern)` to + # compute the sparse layout indices and values because + # generate_simple_inputs is used to generate the inputs to + # test `torch.sparse_xyz_tensor` factory functions, so + # we'll compute the indices and values independently of + # the factory functions. + + indices = torch.where(pattern != 0) + coo_indices = torch.stack(indices) + crow_indices = torch.zeros(basesize[0] + 1, dtype=torch.int64) + crow_indices[1:] = torch.cumsum(coo_indices[0].bincount(minlength=basesize[0]), 0) + col_indices = coo_indices[1] + strided_values = torch.zeros(basesize, dtype=torch.int64) + + # the property of `values == range(1, 1+nnz)` is used in + # get_sparse_data_with_block to relate BSR and BSC values, + # so, don't change the following line: + values = torch.arange(1, 1 + len(indices[0]), dtype=torch.int64) + strided_values[indices] = values + + indices_T = torch.where(pattern.transpose(0, 1) != 0) + coo_indices_T = torch.stack(indices_T) + ccol_indices = torch.zeros(basesize[1] + 1, dtype=torch.int64) + ccol_indices[1:] = torch.cumsum(coo_indices_T[0].bincount(minlength=basesize[1]), 0) + row_indices = coo_indices_T[1] + csc_values = strided_values.transpose(0, 1)[indices_T] + + return {torch.sparse_coo: (coo_indices, values), + torch.sparse_csr: (crow_indices, col_indices, values), + torch.sparse_csc: (ccol_indices, row_indices, csc_values), + torch.strided: (strided_values,)} + + def get_sparse_data_with_block(pattern, blocksize): + nonblock_data = get_sparse_data(pattern) + blockpattern = get_blockpattern(pattern, blocksize) + block_data = get_sparse_data(blockpattern) + + strided_values = nonblock_data[torch.strided][0] + block_indices = block_data[torch.sparse_coo][0] + bsr_values = torch.stack([strided_values[bi * blocksize[0]:(bi + 1) * blocksize[0], + bj * blocksize[1]:(bj + 1) * blocksize[1]] + for bi, bj in block_indices.transpose(0, 1)]) + + # here we use the property `values == range(1, 1+nnz)` and + # `values` relation to `csc_values` (see get_sparse_data) + # to get BSC blocks via reordering the BSR blocks: + bsc_values = bsr_values[block_data[torch.sparse_csc][2] - 1] + + return {torch.sparse_bsr: (*block_data[torch.sparse_csr][:2], bsr_values), + torch.sparse_bsc: (*block_data[torch.sparse_csc][:2], bsc_values), + **nonblock_data} + + def get_batch_sparse_data(pattern, blocksize): + size = pattern.shape + if len(size) <= 2: # non-batch + return get_sparse_data_with_block(pattern, blocksize) + + # batch data is created recursively: + batch_data = {} + for i, item in enumerate(pattern): + for layout, d in get_batch_sparse_data(item, blocksize).items(): + target = batch_data.get(layout) + if layout is torch.sparse_coo: + # a "batch COO" means a COO with the leading + # sparse dimensions interpreted as batch + # dimensions + ext_coo_indices1 = torch.cat((torch.full((1, len(d[1])), i, dtype=torch.int64), d[0])) + if target is None: + target = batch_data[layout] = (ext_coo_indices1, d[1]) + else: + target[0].set_(torch.cat((target[0], ext_coo_indices1), 1)) + target[1].set_(torch.cat((target[1], d[1]))) + else: + if target is None: + target = batch_data[layout] = tuple(d[j].unsqueeze(0) for j in range(len(d))) + else: + for j in range(len(d)): + target[j].set_(torch.cat((target[j], d[j].unsqueeze(0)))) + return batch_data + + def generate_values(base, densesize): + """Generates a tensor of shape densesize with values equal to + + base + i_1 * 10^0 + ... + i_d * 10^{d - 1} + + at indices i_1, ..., i_d (with 0 <= i_j < densesize[j] for any 1 <= j <= + len(densesize)) + + This mapping produces unique values as long as + densesize[i] < 10 for all i in range(len(densesize)). + """ + + if not densesize: + return base + if not isinstance(base, int) and base.ndim > 0: + return torch.stack([generate_values(b, densesize) for b in base]) + if base == 0: + return torch.zeros(densesize, dtype=torch.int64) + r = torch.arange(densesize[0], dtype=torch.int64) + for i, d in enumerate(densesize[1:]): + y = torch.arange(d, dtype=torch.int64) * (10 ** (i + 1)) + r = r[..., None] + y[None, ...] + r.add_(base) + return r + + if patterns is None: + # A pattern is a 3-tuple with the following items: + # + # - a list of integers with the depth of two or more. The + # integers define the sparsity patterns of the generated + # inputs: zero values correspond to unspecified + # elements/blocks, and non-zero values to the specified + # elements. + # + # For debugging convenience, the elements with the same + # value typically belong to the same block. However, it + # is not a hard requirement: as long as the shape of a + # pattern divides with block sizes, the pattern will be + # a valid one. + # + # If the depth of the list is larger than two, inputs + # with batch dimensions will be generated. + # + # - a list of 2-tuples of block sizes, used to generate + # BSR/BSC tensors with various block size parameters + # + # - a list of tuples of dense dimensions, used to generate + # hybrid tensors with various dense dimensions + # + patterns = [ + # a simple 3 x 2 tensor: non-hybrid, hybrid with 1 and 2 dense dimensions + ([[1, 2, 0], + [1, 0, 3]], [(2, 1), (1, 3)], [(), (2,), (4, 5)]), + # 2 x 3 batch of 3 x 2 tensors: non-hybrid and hybrid with 2 dense dimensions + ([[[[1, 2, 0], + [1, 0, 3]], + [[1, 2, 3], + [1, 0, 0]], + [[1, 0, 0], + [1, 2, 3]]], + [[[0, 2, 0], + [1, 2, 3]], + [[1, 0, 3], + [1, 2, 0]], + [[1, 2, 3], + [0, 2, 0]]]], [(2, 1), (2, 3)], [(), (2,)]), + # tensor with non-trivial blocksize + ([[0, 1, 0, 2, 0, 2], + [0, 1, 0, 0, 2, 0], + [3, 3, 3, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 5, 0, 6, 6, 6], + [5, 0, 5, 6, 6, 6], + [0, 0, 0, 0, 8, 8], + [7, 7, 7, 0, 8, 8]], [(2, 3)], [(), (4, 5)]), + # batch tensor with variable NSE + # Requires https://github.com/pytorch/pytorch/pull/84843 or similar. + ([[[1, 2], + [3, 4]], + [[1, 0], + [0, 0]]], [(1, 1)], ([()] if enable_batch_variable_nse else []))] + + def non_contiguous_copy(t, dim=-1, offset=0): + # return a copy of t that is non-contiguous along the + # given dimension and with the given storage offset + self.assertTrue(t.is_contiguous()) + if dim < 0: + dim = dim + t.ndim + assert dim >= 0 and dim < t.ndim + step = max(2, offset + 1) + tmp = torch.zeros((*t.shape[:dim], t.shape[dim] * step, *t.shape[dim + 1:]), dtype=t.dtype, device=t.device) + dim_slices = (*((slice(None),) * dim), slice(offset, None, step)) + r = tmp[dim_slices].copy_(t) + self.assertFalse(r.is_contiguous()) + self.assertEqual(t, r) + return r + + # the main loop of the method: + for pattern, blocksizes, densesizes in patterns: + if not enable_hybrid: + densesizes = [s for s in densesizes if not s] + if not (densesizes and blocksizes): + continue + pattern = torch.tensor(pattern, dtype=torch.int64) + if not enable_batch and pattern.ndim > 2: + continue + for blocksize in blocksizes: + data = get_batch_sparse_data(pattern, blocksize)[layout] + for densesize in densesizes: + indices = [a.to(device=device, dtype=index_dtype) for a in data[:-1]] + values = generate_values(data[-1], densesize).to(device=device, dtype=dtype) + yield (*indices, values), dict(device=device, dtype=dtype, + size=pattern.shape + densesize) + + if enable_non_contiguous_indices and pattern.ndim > 2: + # sparse compressed indices can be sliced only along batch dimensions + for (dim, offset) in {(0, 1), (-2, 0)}: + indices_copy = [non_contiguous_copy(a, dim=dim, offset=offset) for a in indices] + yield (*indices_copy, values), dict(device=device, dtype=dtype, + size=pattern.shape + densesize) + + if enable_non_contiguous_values: + values_copy = non_contiguous_copy(values, dim=-1, offset=1) + yield (*indices_copy, values_copy), dict(device=device, dtype=dtype, + size=pattern.shape + densesize) + + if enable_non_contiguous_values: + values_copy = non_contiguous_copy(values, dim=-1, offset=1) + yield (*indices, values_copy), dict(device=device, dtype=dtype, + size=pattern.shape + densesize) + + # zero-sized tensor inputs, non-batch, non-hybrid/hybrid + if enable_zero_sized: + for basesize, blocksizes, densesizes in [ + ((2, 0), [(1, 2)], [(), (2,), (2, 3)] if enable_hybrid else [()]), + ((0, 2), [(1, 2), (2, 1), (3, 2)], [()]), + ((0, 0), [(1, 2)], [()]), + ]: + for blocksize in blocksizes: + for densesize in densesizes: + if layout == torch.strided: + indices = () + values = torch.empty((basesize + densesize), device=device, dtype=dtype) + elif layout == torch.sparse_coo: + indices = (torch.empty(len(basesize), 0, device=device, dtype=index_dtype),) + values = torch.empty((0, *densesize), device=device, dtype=dtype) + elif layout == torch.sparse_csr: + crow_indices = torch.tensor([0] * (basesize[0] + 1), device=device, dtype=index_dtype) + col_indices = torch.empty(0, device=device, dtype=index_dtype) + indices = (crow_indices, col_indices) + values = torch.empty((0, *densesize), device=device, dtype=dtype) + elif layout == torch.sparse_csc: + ccol_indices = torch.tensor([0] * (basesize[1] + 1), device=device, dtype=index_dtype) + row_indices = torch.empty(0, device=device, dtype=index_dtype) + indices = (ccol_indices, row_indices) + values = torch.empty((0, *densesize), device=device, dtype=dtype) + elif layout == torch.sparse_bsr: + crow_indices = torch.tensor([0] * (basesize[0] // blocksize[0] + 1), device=device, dtype=index_dtype) + col_indices = torch.empty(0, device=device, dtype=index_dtype) + indices = (crow_indices, col_indices) + values = torch.empty((0, *blocksize, *densesize), device=device, dtype=dtype) + elif layout == torch.sparse_bsc: + ccol_indices = torch.tensor([0] * (basesize[1] // blocksize[1] + 1), device=device, dtype=index_dtype) + row_indices = torch.empty(0, device=device, dtype=index_dtype) + indices = (ccol_indices, row_indices) + values = torch.empty((0, *blocksize, *densesize), device=device, dtype=dtype) + else: + assert 0 # unreachable + yield (*indices, values), dict(device=device, dtype=dtype, size=basesize + densesize) + + def safeToDense(self, t): + # coalesce is only implemented for COO + if t.layout == torch.sparse_coo: + t = t.coalesce() + return t.to_dense() + + # Compares a torch function with a reference function for a given sample input (object of SampleInput) + # Note: only values are compared, type comparison is not done here + def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs): + numpy_sample = sample_input.numpy() + n_inp, n_args, n_kwargs = numpy_sample.input, numpy_sample.args, numpy_sample.kwargs + t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs + + actual = torch_fn(t_inp, *t_args, **t_kwargs) + expected = ref_fn(n_inp, *n_args, **n_kwargs) + + self.assertEqual(actual, expected, exact_device=False, **kwargs) + + # Compares the given Torch and NumPy functions on the given tensor-like object. + # NOTE: both torch_fn and np_fn should be functions that take a single + # tensor (array). If the torch and/or NumPy function require additional + # arguments then wrap the function in a lambda or pass a partial function. + # TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol) + def compare_with_numpy(self, torch_fn, np_fn, tensor_like, + device=None, dtype=None, **kwargs): + assert TEST_NUMPY + + if isinstance(tensor_like, torch.Tensor): + assert device is None + assert dtype is None + t_cpu = tensor_like.detach().cpu() + if t_cpu.dtype is torch.bfloat16: + t_cpu = t_cpu.float() + a = t_cpu.numpy() + t = tensor_like + else: + d = copy.copy(torch_to_numpy_dtype_dict) + d[torch.bfloat16] = np.float32 + a = np.array(tensor_like, dtype=d[dtype]) + t = torch.tensor(tensor_like, device=device, dtype=dtype) + + np_result = np_fn(a) + torch_result = torch_fn(t).cpu() + + # Converts arrays to tensors + if isinstance(np_result, np.ndarray): + try: + np_result = torch.from_numpy(np_result) + except Exception: + # NOTE: copying an array before conversion is necessary when, + # for example, the array has negative strides. + np_result = torch.from_numpy(np_result.copy()) + if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float: + torch_result = torch_result.to(torch.float) + + self.assertEqual(np_result, torch_result, **kwargs) + + def assertEqualIgnoreType(self, *args, **kwargs) -> None: + # If you are seeing this function used, that means test is written wrongly + # and deserves detailed investigation + return self.assertEqual(*args, exact_dtype=False, **kwargs) + + def assertEqualBroadcasting(self, x, y, *args, **kwargs) -> None: + r"""Tests if tensor x equals to y, if y to be broadcast to x.shape. + """ + if not isinstance(y, Iterable): + # int, float, etc. or different shape tensors + y = torch.ones_like(x) * y + if not isinstance(y, torch.Tensor): + # iterable, but not a tensor + y = torch.ones_like(x) * torch.tensor(y) + return self.assertEqual(x, y, *args, **kwargs) + + def assertEqual( + self, + x, + y, + msg: Optional[Union[str, Callable[[str], str]]] = None, + *, + atol: Optional[float] = None, + rtol: Optional[float] = None, + equal_nan=True, + exact_dtype=True, + # TODO: default this to True + exact_device=False, + exact_layout=False, + exact_stride=False, + exact_is_coalesced=False + ): + # Hide this function from `pytest`'s traceback + __tracebackhide__ = True + + # numpy's dtypes are a superset of what PyTorch supports. In case we encounter an unsupported dtype, we fall + # back to an elementwise comparison. Note that this has to happen here and not for example in + # `TensorOrArrayPair`, since at that stage we can no longer split the array into its elements and perform + # multiple comparisons. + if any( + isinstance(input, np.ndarray) and not has_corresponding_torch_dtype(input.dtype) for input in (x, y) + ): + def to_list(input): + return input.tolist() if isinstance(input, (torch.Tensor, np.ndarray)) else list(input) + + x = to_list(x) + y = to_list(y) + # When comparing a sequence of numbers to a tensor, we need to convert the sequence to a tensor here. + # Otherwise, the pair origination of `are_equal` will fail, because the sequence is recognized as container + # that should be checked elementwise while the tensor is not. + elif isinstance(x, torch.Tensor) and isinstance(y, Sequence): + y = torch.as_tensor(y, dtype=x.dtype, device=x.device) + elif isinstance(x, Sequence) and isinstance(y, torch.Tensor): + x = torch.as_tensor(x, dtype=y.dtype, device=y.device) + + # If x or y are tensors and nested then we unbind them to a list of tensors this should allow us to compare + # a nested tensor to a nested tensor and a nested tensor to a list of expected tensors + if isinstance(x, torch.Tensor) and x.is_nested: + x = x.unbind() + if isinstance(y, torch.Tensor) and y.is_nested: + y = y.unbind() + + error_metas = not_close_error_metas( + x, + y, + pair_types=( + NonePair, + RelaxedBooleanPair, + RelaxedNumberPair, + TensorOrArrayPair, + TypedStoragePair, + StringPair, + SetPair, + TypePair, + ObjectPair, + ), + sequence_types=( + Sequence, + Sequential, + ModuleList, + ParameterList, + ScriptList, + torch.utils.data.dataset.Subset, + ), + mapping_types=(Mapping, ModuleDict, ParameterDict, ScriptDict), + rtol=rtol, + rtol_override=self.rel_tol, + atol=atol, + atol_override=self.precision, + equal_nan=equal_nan, + check_device=exact_device, + check_dtype=exact_dtype, + check_layout=exact_layout, + check_stride=exact_stride, + check_is_coalesced=exact_is_coalesced, + ) + + if error_metas: + # See [ErrorMeta Cycles] + error_metas = [error_metas] + # TODO: compose all metas into one AssertionError + raise error_metas.pop()[0].to_error( + # This emulates unittest.TestCase's behavior if a custom message passed and + # TestCase.longMessage (https://docs.python.org/3/library/unittest.html#unittest.TestCase.longMessage) + # is True (default) + (lambda generated_msg: f"{generated_msg}\n{msg}") if isinstance(msg, str) and self.longMessage else msg + ) + + def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override] + atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None: + with self.assertRaises(AssertionError, msg=msg): + self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs) + + def assertEqualTypeString(self, x, y) -> None: + # This API is used simulate deprecated x.type() == y.type() + self.assertEqual(x.device, y.device) + self.assertEqual(x.dtype, y.dtype) + self.assertEqual(x.is_sparse, y.is_sparse) + + def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None: + for elem in iterable: + if id(obj) == id(elem): + return + raise AssertionError("object not found in iterable") + + # Reimplemented to provide special behavior when + # _ignore_not_implemented_error is True + def assertRaises(self, expected_exception, *args, **kwargs): + if self._ignore_not_implemented_error: + context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \ + AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg] + try: + return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr] + finally: + # see https://bugs.python.org/issue23890 + context = None + else: + return super().assertRaises(expected_exception, *args, **kwargs) + + # Reimplemented to provide special behavior when + # _ignore_not_implemented_error is True + def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs): + # Verifies that an exception with the type expected_exception and message + # matching the regular expression defined by expected_regex is thrown. + # If the test is instantiated for a non-native device type (like XLA) + # then the message is not validated. + + # Checks whether the test is instantiated for a device type by testing + # if the test class has defined the device_type attribute and, + # if so, tests whether the instantiated device type is native or not + if hasattr(self, 'device_type') and self.device_type not in NATIVE_DEVICES and self.device_type != "mps": # type: ignore[attr-defined] + # empty string matches any string + expected_regex = '' + + if self._ignore_not_implemented_error: + context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg] + expected_exception, self, expected_regex) + return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined] + else: + return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs) + + # Verifies that no unraisable exceptions are raised by callable. Unlike regular + # exceptions, these do not actually propagate to the caller and are + # suppressed. We must test for them specially. + def assertNoUnraisable(self, callable, *args, **kwargs): + raised = None + + def record_unraisable(unraisable): + nonlocal raised + raised = unraisable + + # Disable GC when running the callable to prevent spurious flakiness + # from unlucky GCs inside the callable + prev = gc.isenabled() + gc.disable() + try: + with unittest.mock.patch("sys.unraisablehook", record_unraisable): + callable(*args, **kwargs) + finally: + if prev: + gc.enable() + + self.assertIsNone(raised) + + # TODO: Support context manager interface + # NB: The kwargs forwarding to callable robs the 'subname' parameter. + # If you need it, manually apply your callable in a lambda instead. + def assertExpectedRaises(self, exc_type, callable, *args, **kwargs): + subname = None + if 'subname' in kwargs: + subname = kwargs['subname'] + del kwargs['subname'] + try: + callable(*args, **kwargs) + except exc_type as e: + self.assertExpected(str(e), subname) + return + # Don't put this in the try block; the AssertionError will catch it + self.fail(msg="Did not raise when expected to") + + def assertNotWarn(self, callable, msg=''): + r""" + Test if :attr:`callable` does not raise a warning. + """ + with warnings.catch_warnings(record=True) as ws: + warnings.simplefilter("always") # allow any warning to be raised + with set_warn_always_context(True): + callable() + self.assertTrue(len(ws) == 0, msg) + + @contextmanager + def assertWarnsOnceRegex(self, category, regex=''): + """Context manager for code that *must always* warn + + This filters expected warnings from the test and fails if + the expected warning is not caught. It uses set_warn_always() to force + TORCH_WARN_ONCE to behave like TORCH_WARN + """ + pattern = re.compile(regex) + with warnings.catch_warnings(record=True) as ws: + warnings.simplefilter("always") # allow any warning to be raised + with set_warn_always_context(True): + yield + if len(ws) == 0: + self.fail('no warning caught') + self.assertTrue(any(type(w.message) is category for w in ws)) + self.assertTrue( + any(re.match(pattern, str(w.message)) for w in ws), + f'{pattern}, {[w.message for w in ws if type(w.message) is category]}') + + def assertExpected(self, s, subname=None): + r""" + Test that a string matches the recorded contents of a file + derived from the name of this test and subname. This file + is placed in the 'expect' directory in the same directory + as the test script. You can automatically update the recorded test + output using --accept. + + If you call this multiple times in a single function, you must + give a unique subname each time. + """ + if not isinstance(s, str): + raise TypeError("assertExpected is strings only") + + def remove_prefix(text, prefix): + if text.startswith(prefix): + return text[len(prefix):] + return text + # NB: we take __file__ from the module that defined the test + # class, so we place the expect directory where the test script + # lives, NOT where test/common_utils.py lives. This doesn't matter in + # PyTorch where all test scripts are in the same directory as + # test/common_utils.py, but it matters in onnx-pytorch + module_id = self.__class__.__module__ + munged_id = remove_prefix(self.id(), module_id + ".") + test_file = os.path.realpath(sys.modules[module_id].__file__) + expected_file = os.path.join(os.path.dirname(test_file), + "expect", + munged_id) + + subname_output = "" + if subname: + expected_file += "-" + subname + subname_output = f" ({subname})" + expected_file += ".expect" + expected = None + + def accept_output(update_type): + print(f"Accepting {update_type} for {munged_id}{subname_output}:\n\n{s}") + with open(expected_file, 'w') as f: + # Adjust for producer_version, leave s unmodified + s_tag = re.sub(r'(producer_version): "[0-9.]*"', + r'\1: "CURRENT_VERSION"', s) + f.write(s_tag) + + try: + with open(expected_file) as f: + expected = f.read() + except OSError as e: + if e.errno != errno.ENOENT: + raise + elif expecttest.ACCEPT: + return accept_output("output") + else: + raise RuntimeError( + f"I got this output for {munged_id}{subname_output}:\n\n{s}\n\n" + "No expect file exists; to accept the current output, run:\n" + f"python {__main__.__file__} {munged_id} --accept") from None + + # a hack for JIT tests + if IS_WINDOWS: + expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected) + s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s) + + # Adjust for producer_version + expected = expected.replace( + 'producer_version: "CURRENT_VERSION"', + f'producer_version: "{torch.onnx.producer_version}"' + ) + if expecttest.ACCEPT: + if expected != s: + return accept_output("updated output") + else: + if hasattr(self, "assertMultiLineEqual"): + # Python 2.7 only + # NB: Python considers lhs "old" and rhs "new". + self.assertMultiLineEqual(expected, s) + else: + self.assertEqual(s, expected) + + def assertExpectedStripMangled(self, s, subname=None): + s = re.sub(r'__torch__[^ ]+', '', s) + self.assertExpected(s, subname) + + def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None): + """Assert that ``first`` is greater than or almost equal to ``second``. + + The equality of ``first`` and ``second`` is determined in a similar way to + the ``assertAlmostEqual`` function of the standard library. + """ + if delta is not None and places is not None: + raise TypeError("specify delta or places not both") + + if first >= second: + return + + diff = second - first + if delta is not None: + if diff <= delta: + return + + standardMsg = f"{first} not greater than or equal to {second} within {delta} delta" + else: + if places is None: + places = 7 + + if round(diff, places) == 0: + return + + standardMsg = f"{first} not greater than or equal to {second} within {places} places" + + msg = self._formatMessage(msg, standardMsg) + raise self.failureException(msg) + + def assertAtenOp(self, onnx_model, operator, overload_name=""): + all_aten_nodes = [p for p in onnx_model.graph.node + if p.op_type == "ATen" and p.domain == "org.pytorch.aten"] + self.assertTrue(all_aten_nodes) + + for op in all_aten_nodes: + attrs = {attr.name: attr.s.decode() for attr in op.attribute} + if attrs.get("operator") == operator: + break + + self.assertEqual(attrs["operator"], operator) + self.assertEqual(attrs.get("overload_name", ""), overload_name) + + def check_nondeterministic_alert(self, fn, caller_name, should_alert=True): + '''Checks that an operation produces a nondeterministic alert when + expected while `torch.use_deterministic_algorithms(True)` is set. + + Args: + fn (callable): Function to check for a nondeterministic alert + + caller_name (str): Name of the operation that produces the + nondeterministic alert. This name is expected to appear at the + beginning of the error/warning message. + + should_alert (bool, optional): If True, then the check will only pass + if calling `fn` produces a nondeterministic error/warning with the + expected message. If False, then the check will only pass if + calling `fn` does not produce an error. Default: `True`. + ''' + + alert_message = '^' + caller_name + ' does not have a deterministic implementation, but you set' + + # Check that errors are thrown correctly + with DeterministicGuard(True): + if should_alert: + with self.assertRaisesRegex( + RuntimeError, + alert_message, + msg='expected a non-deterministic error, but it was not raised'): + fn() + + else: + # If a nondeterministic error is not expected, make sure + # that it is not raised + try: + fn() + except RuntimeError as e: + if 'does not have a deterministic implementation' in str(e): + self.fail( + 'did not expect non-deterministic error message, ' + + 'but got one anyway: "' + str(e) + '"') + # Reraise exceptions unrelated to nondeterminism + raise + + # Check that warnings are thrown correctly + with DeterministicGuard(True, warn_only=True): + if should_alert: + with self.assertWarnsRegex( + UserWarning, + alert_message): + fn() + else: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + fn() + for warning in w: + if isinstance(warning, UserWarning): + self.assertTrue(re.search(alert_message, str(warning)) is None) + + # run code in subprocess and capture exceptions. + @staticmethod + def run_process_no_exception(code, env=None): + import subprocess + + popen = subprocess.Popen( + [sys.executable, '-c', code], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env) + (stdout, stderr) = popen.communicate() + return (stdout, stderr) + + # returns captured stderr + @staticmethod + def runWithPytorchAPIUsageStderr(code): + env = os.environ.copy() + env["PYTORCH_API_USAGE_STDERR"] = "1" + # remove CI flag since this is a wrapped test process. + # CI flag should be set in the parent process only. + if "CI" in env.keys(): + del env["CI"] + (stdout, stderr) = TestCase.run_process_no_exception(code, env=env) + return stderr.decode('ascii') + + +def download_file(url, binary=True): + from urllib.parse import urlsplit + from urllib import request, error + + filename = os.path.basename(urlsplit(url)[2]) + data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data')) + path = os.path.join(data_dir, filename) + + if os.path.exists(path): + return path + try: + data = request.urlopen(url, timeout=15).read() + with open(path, 'wb' if binary else 'w') as f: + f.write(data) + return path + except error.URLError as e: + msg = f"could not download test file '{url}'" + warnings.warn(msg, RuntimeWarning) + raise unittest.SkipTest(msg) from e + +def find_free_port(): + """ + Finds an available port and returns that port number. + + NOTE: If this function is being used to allocate a port to Store (or + indirectly via init_process_group or init_rpc), it should be used + in conjuction with the `retry_on_connect_failures` decorator as there is a potential + race condition where the allocated port may become unavailable before it can be used + """ + with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(('localhost', 0)) + _, port = sock.getsockname() + return port + +# Errors that we can get in c10d initialization for which we should retry tests for. +ADDRESS_IN_USE = "Address already in use" +CONNECT_TIMEOUT = "connect() timed out." + +def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)): + """Reruns a test if the test returns a RuntimeError and the exception + contains one of the strings in connect_errors.""" + # This if block is executed when using this function as a decorator with arguments. + if func is None: + return partial(retry_on_connect_failures, connect_errors=connect_errors) + + @wraps(func) + def wrapper(*args, **kwargs): + n_retries = 10 + tries_remaining = n_retries + while True: + try: + return func(*args, **kwargs) + except RuntimeError as error: + if any(connect_error in str(error) for connect_error in connect_errors): + tries_remaining -= 1 + if tries_remaining == 0: + raise RuntimeError(f"Failing after {n_retries} retries with error: {str(error)}") from error + time.sleep(random.random()) + continue + raise + return wrapper + + +# Decorator to retry upon certain Exceptions. +def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False): + def deco_retry(f): + @wraps(f) + def f_retry(*args, **kwargs): + mtries, mdelay = tries, delay + while mtries > 1: + try: + return f(*args, **kwargs) + except ExceptionToCheck as e: + msg = "%s, Retrying in %d seconds..." % (str(e), mdelay) + print(msg) + time.sleep(mdelay) + mtries -= 1 + try: + return f(*args, **kwargs) + except ExceptionToCheck as e: + raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e + return f_retry # true decorator + return deco_retry + + +# FIXME: modernize these to be consistent with make_tensor +# and review including them in torch.testing +# Methods for matrix generation + +def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'): + assert rank <= l + A = torch.randn(l, l, dtype=dtype, device=device) + u, s, vh = torch.linalg.svd(A, full_matrices=False) + for i in range(l): + if i >= rank: + s[i] = 0 + elif s[i] == 0: + s[i] = 1 + return (u * s.to(dtype).unsqueeze(-2)) @ vh + +def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001): + """ + Returns a random rectangular matrix (batch of matrices) + with singular values sampled from a Gaussian with + mean `mean` and standard deviation `sigma`. + The smaller the `sigma`, the better conditioned + the output matrix is. + """ + primitive_dtype = { + torch.float: torch.float, + torch.double: torch.double, + torch.cfloat: torch.float, + torch.cdouble: torch.double + } + x = torch.rand(shape, dtype=dtype, device=device) + m = x.size(-2) + n = x.size(-1) + u, _, vh = torch.linalg.svd(x, full_matrices=False) + s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \ + .sort(-1, descending=True).values.to(dtype) + return (u * s.unsqueeze(-2)) @ vh + +# Returns a noncontiguous (tensor with the same shape and values as t +# The noncontiguous tensor is constructed such that elements in the innermost +# dimension are separated by zeros or (whenever possible) nans +# TODO: consider more complicated noncontiguity schemes +def noncontiguous_like(t): + # Short-circuits if t is already noncontiguous + if not t.is_contiguous(): + return t + + # Choose a "weird" value that won't be accessed + if t.dtype.is_floating_point or t.dtype.is_complex: + value = math.nan + elif t.dtype == torch.bool: + value = True + else: + value = 12 + + result = t.new_empty(t.shape + (2,)) + result[..., 0] = value + result[..., 1] = t.detach() + result = result[..., 1] + result.requires_grad_(t.requires_grad) + return result + +# TODO: remove this (prefer make_symmetric_matrices below) +def random_symmetric_matrix(l, *batches, **kwargs): + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) + A = (A + A.mT).div_(2) + return A + +# Creates a symmetric matrix or batch of symmetric matrices +# Shape must be a square matrix or batch of square matrices +def make_symmetric_matrices(*shape, device, dtype): + assert shape[-1] == shape[-2] + t = make_tensor(shape, device=device, dtype=dtype) + t = (t + t.mT).div_(2) + return t + +def random_hermitian_matrix(l, *batches, **kwargs): + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) + A = (A + A.mH).div_(2) + return A + + +def random_symmetric_psd_matrix(l, *batches, **kwargs): + """ + Returns a batch of random symmetric positive-semi-definite matrices. + The shape of the result is batch_dims + (matrix_size, matrix_size) + The following example creates a tensor of size 2 x 4 x 3 x 3 + >>> # xdoctest: +SKIP("undefined variables") + >>> matrices = random_symmetric_psd_matrix(3, 2, 4, dtype=dtype, device=device) + """ + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) + return A @ A.mT + + +def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'): + """ + Returns a batch of random Hermitian positive-semi-definite matrices. + The shape of the result is batch_dims + (matrix_size, matrix_size) + The following example creates a tensor of size 2 x 4 x 3 x 3 + >>> # xdoctest: +SKIP("undefined variables") + >>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device) + """ + A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device) + return A @ A.mH + + +# TODO: remove this (prefer make_symmetric_pd_matrices below) +def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs): + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), + dtype=dtype, device=device) + return torch.matmul(A, A.mT) \ + + torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5 + + +# Creates a symmetric positive-definite matrix or batch of +# such matrices +def make_symmetric_pd_matrices(*shape, device, dtype): + assert shape[-1] == shape[-2] + t = make_tensor(shape, device=device, dtype=dtype) + i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5 + return t @ t.mT + i + +def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device): + """ + Returns a batch of random Hermitian positive-definite matrices. + The shape of the result is batch_dims + (matrix_size, matrix_size) + The following example creates a tensor of size 2 x 4 x 3 x 3 + >>> # xdoctest: +SKIP("undefined variables") + >>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device) + """ + A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), + dtype=dtype, device=device) + return A @ A.mH + torch.eye(matrix_size, dtype=dtype, device=device) + +# Creates a full rank matrix with distinct singular values or +# a batch of such matrices +def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype, requires_grad=False): + with torch.no_grad(): + t = make_tensor(shape, device=device, dtype=dtype) + u, _, vh = torch.linalg.svd(t, full_matrices=False) + real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype + k = min(shape[-1], shape[-2]) + # We choose the singular values to be "around one" + # This is to make the matrix well conditioned + # s = [2, 3, ..., k+1] + s = torch.arange(2, k + 2, dtype=real_dtype, device=device) + # s = [2, -3, 4, ..., (-1)^k k+1] + s[1::2] *= -1. + # 1 + 1/s so that the singular values are in the range [2/3, 3/2] + # This gives a condition number of 9/4, which should be good enough + s.reciprocal_().add_(1.) + # Note that the singular values need not be ordered in an SVD so + # we don't need need to sort S + x = (u * s.to(u.dtype)) @ vh + x.requires_grad_(requires_grad) + return x + +def random_matrix(rows, columns, *batch_dims, **kwargs): + """Return rectangular matrix or batches of rectangular matrices. + + Parameters: + dtype - the data type + device - the device kind + singular - when True, the output will be singular + """ + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + silent = kwargs.get("silent", False) + singular = kwargs.get("singular", False) + if silent and not torch._C.has_lapack: + return torch.ones(rows, columns, dtype=dtype, device=device) + + A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device) + if A.numel() == 0: + return A + u, _, vh = torch.linalg.svd(A, full_matrices=False) + k = min(rows, columns) + s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device) + if singular: + # make matrix singular + s[k - 1] = 0 + if k > 2: + # increase the order of singularity so that the pivoting + # in LU factorization will be non-trivial + s[0] = 0 + return (u * s.unsqueeze(-2)) @ vh + + +def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs): + """Return rectangular matrix or batches of rectangular matrices with + given rank. + """ + B = random_matrix(rows, rank, *batch_dims, **kwargs) + C = random_matrix(rank, columns, *batch_dims, **kwargs) + return B.matmul(C) + + +def random_sparse_matrix(rows, columns, density=0.01, **kwargs): + """Return rectangular random sparse matrix within given density. + + The density of the result approaches to given density as the size + of the matrix is increased and a relatively small value of density + is specified but higher than min(rows, columns)/(rows * columns) + for non-singular matrices. + """ + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + singular = kwargs.get("singular", False) + + k = min(rows, columns) + nonzero_elements = max(min(rows, columns), int(rows * columns * density)) + + row_indices = [i % rows for i in range(nonzero_elements)] + column_indices = [i % columns for i in range(nonzero_elements)] + random.shuffle(column_indices) + indices = [row_indices, column_indices] + values = torch.randn(nonzero_elements, dtype=dtype, device=device) + # ensure that the diagonal dominates + values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp() + indices_tensor = torch.tensor(indices) + A = torch.sparse_coo_tensor(indices_tensor, values, (rows, columns), device=device) + return A.coalesce() + + +def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs): + """Return random sparse positive-definite matrix with given density. + + The eigenvalues of the matrix are defined as:: + arange(1, matrix_size+1)/matrix_size + + Algorithm: + A = diag(arange(1, matrix_size+1)/matrix_size) + while : + + R = + A = R^T A R + """ + import math + torch = kwargs.get('torch', globals()['torch']) + dtype = kwargs.get('dtype', torch.double) + device = kwargs.get('device', 'cpu') + data = {(i, i): float(i + 1) / matrix_size + for i in range(matrix_size)} + + + def multiply(data, N, i, j, cs, sn, left=True): + for k in range(N): + if left: + ik, jk = (k, i), (k, j) + else: + ik, jk = (i, k), (j, k) + aik, ajk = data.get(ik, 0), data.get(jk, 0) + aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk + if aik: + data[ik] = aik + else: + data.pop(ik, None) + if ajk: + data[jk] = ajk + else: + data.pop(jk, None) + + target_nnz = density * matrix_size * matrix_size + while len(data) < target_nnz: + i = random.randint(0, matrix_size - 1) + j = random.randint(0, matrix_size - 1) + if i != j: + theta = random.uniform(0, 2 * math.pi) + cs = math.cos(theta) + sn = math.sin(theta) + multiply(data, matrix_size, i, j, cs, sn, left=True) + multiply(data, matrix_size, i, j, cs, sn, left=False) + icoords, jcoords, values = [], [], [] + for (i, j), v in sorted(data.items()): + icoords.append(i) + jcoords.append(j) + values.append(v) + indices_tensor = torch.tensor([icoords, jcoords]) + return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device) + +# FIXME: remove this by updating test suites using it +def do_test_dtypes(self, dtypes, layout, device): + for dtype in dtypes: + if dtype != torch.float16: + out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device) + self.assertIs(dtype, out.dtype) + self.assertIs(layout, out.layout) + self.assertEqual(device, out.device) + +# FIXME: remove this by updating test suites using it +def do_test_empty_full(self, dtypes, layout, device): + shape = torch.Size([2, 3]) + + def check_value(tensor, dtype, layout, device, value, requires_grad): + self.assertEqual(shape, tensor.shape) + self.assertIs(dtype, tensor.dtype) + self.assertIs(layout, tensor.layout) + self.assertEqual(tensor.requires_grad, requires_grad) + if tensor.is_cuda and device is not None: + self.assertEqual(device, tensor.device) + if value is not None: + fill = tensor.new(shape).fill_(value) + self.assertEqual(tensor, fill) + + def get_int64_dtype(dtype): + module = '.'.join(str(dtype).split('.')[1:-1]) + if not module: + return torch.int64 + return operator.attrgetter(module)(torch).int64 + + default_dtype = torch.get_default_dtype() + check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False) + check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False) + for dtype in dtypes: + for rg in {dtype.is_floating_point, False}: + int64_dtype = get_int64_dtype(dtype) + v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg) + check_value(v, dtype, layout, device, None, rg) + out = v.new() + check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg), + dtype, layout, device, None, rg) + check_value(v.new_empty(shape), dtype, layout, device, None, False) + check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False), + int64_dtype, layout, device, None, False) + check_value(torch.empty_like(v), dtype, layout, device, None, False) + check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False), + int64_dtype, layout, device, None, False) + + if dtype is not torch.float16 and layout != torch.sparse_coo: + fv = 3 + v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg) + check_value(v, dtype, layout, device, fv, rg) + check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False) + out = v.new() + check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg), + dtype, layout, device, fv + 2, rg) + check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False), + int64_dtype, layout, device, fv + 3, False) + check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False) + check_value(torch.full_like(v, fv + 5, + dtype=int64_dtype, layout=layout, device=device, requires_grad=False), + int64_dtype, layout, device, fv + 5, False) + +# FIXME: improve load_tests() documentation here +running_script_path = None +def set_running_script_path(): + global running_script_path + try: + running_file = os.path.abspath(os.path.realpath(sys.argv[0])) + if running_file.endswith('.py'): # skip if the running file is not a script + running_script_path = running_file + except Exception: + pass + +def check_test_defined_in_running_script(test_case): + if running_script_path is None: + return + test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__))) + assert test_case_class_file == running_script_path, f"Class of loaded TestCase \"{test_case.id()}\" " \ + f"is not defined in the running script \"{running_script_path}\", but in \"{test_case_class_file}\". Did you " \ + "accidentally import a unittest.TestCase from another file?" + +def load_tests(loader, tests, pattern): + set_running_script_path() + test_suite = unittest.TestSuite() + for test_group in tests: + if not DISABLE_RUNNING_SCRIPT_CHK: + for test in test_group: + check_test_defined_in_running_script(test) + if test_group._tests: + test_suite.addTest(test_group) + return test_suite + +# FIXME: document this and move it to test_serialization +class BytesIOContext(io.BytesIO): + def __enter__(self): + return self + + def __exit__(self, *args): + pass + +# Tentative value for nondet_tol for gradcheck when backward implementation +# relies on nondeterministic operations, i.e., those listed here: +# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html +# +# For more information see https://github.com/pytorch/pytorch/issues/56202 +GRADCHECK_NONDET_TOL = 1e-12 + +TestEnvironment.def_flag("TEST_WITH_SLOW_GRADCHECK", env_var="PYTORCH_TEST_WITH_SLOW_GRADCHECK") + +skipIfSlowGradcheckEnv = unittest.skipIf( + TEST_WITH_SLOW_GRADCHECK, + "Tests that don't use gradcheck don't need to run on slow_gradcheck CI" +) + +def gradcheck(fn, inputs, **kwargs): + # Wrapper around gradcheck that enables certain keys by default. + # Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and + # forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks + # to be disabled to default for the public-facing api to avoid breaking user code. + # + # All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck. + default_values = { + "check_batched_grad": True, + "fast_mode": True, + } + + if TEST_WITH_SLOW_GRADCHECK: + default_values["fast_mode"] = False + + for key, value in default_values.items(): + # default value override values explicitly set to None + k = kwargs.get(key, None) + kwargs[key] = k if k is not None else value + + return torch.autograd.gradcheck(fn, inputs, **kwargs) + +def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs): + # Wrapper around gradgradcheck that enables certain keys by default + # See gradcheck above for an explanation of why we need something like this. + # + # All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck + default_values = { + "check_batched_grad": True, + "fast_mode": True, + } + + if TEST_WITH_SLOW_GRADCHECK: + default_values["fast_mode"] = False + + for key, value in default_values.items(): + # default value override values explicitly set to None + k = kwargs.get(key, None) + kwargs[key] = k if k is not None else value + + return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs) + + +def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs): + # call assert function rather than returning a bool since it's nicer + # if we get whether this failed on the gradcheck or the gradgradcheck. + test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs)) + test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs)) + + +@contextmanager +def set_cwd(path: str) -> Iterator[None]: + old_cwd = os.getcwd() + try: + os.chdir(path) + yield + finally: + os.chdir(old_cwd) + + +# FIXME: delete this +# Using @toleranceOverride specific to your test is the recommended way +# of doing this. These are just some values that worked for test_nn. +dtype2prec_DONTUSE = {torch.float: 1e-5, + torch.double: 1e-5, + torch.half: 1e-2, + torch.bfloat16: 1e-1} + +# FIXME: move to test_sparse or sparse utils +# This is a wrapper that wraps a test to run this test twice, one with +# coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors. +def coalescedonoff(f): + @wraps(f) + def wrapped(self, *args, **kwargs): + f(self, *args, **kwargs, coalesced=True) + f(self, *args, **kwargs, coalesced=False) + return wrapped + + +def is_coalesced_indices(s): + indices = s._indices() + hash_coeffs = (1,) + s.shape[s.sparse_dim() - 1:0:-1] + hash_indices = torch.tensor(hash_coeffs, device=s.device).cumprod(-1).flip(-1) + if s.sparse_dim() > 1: + hash_indices.unsqueeze_(-1) + hash_indices = (indices * hash_indices).sum(0) + else: + hash_indices = indices * hash_indices + + # check if indices are sorted + res = torch.allclose(hash_indices, hash_indices.sort()[0]) + + # check if there are no repeated indices + res = res and torch.allclose(hash_indices, hash_indices.unique()) + + return res + + +@contextlib.contextmanager +def disable_gc(): + if gc.isenabled(): + try: + gc.disable() + yield + finally: + gc.enable() + else: + yield + + +def find_library_location(lib_name: str) -> Path: + # return the shared library file in the installed folder if exist, + # else the file in the build folder + torch_root = Path(torch.__file__).resolve().parent + path = torch_root / 'lib' / lib_name + if os.path.exists(path): + return path + torch_root = Path(__file__).resolve().parent.parent.parent + return torch_root / 'build' / 'lib' / lib_name + +def skip_but_pass_in_sandcastle(reason): + """ + Similar to unittest.skip, however in the sandcastle environment it just + "passes" the test instead to avoid creating tasks complaining about tests + skipping continuously. + """ + def decorator(func): + if not IS_SANDCASTLE: + func.__unittest_skip__ = True + func.__unittest_skip_why__ = reason + return func + + @wraps(func) + def wrapper(*args, **kwargs): + print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr) + return + return wrapper + + return decorator + +def mock_wrapper(method): + """ + Returns a function that calls the real implementation of a method + in addition to passing args to a mock object. + """ + mock = MagicMock() + + @wraps(method) + def wrapper(self, *args, **kwargs): + mock(*args, **kwargs) + return method(self, *args, **kwargs) + wrapper.mock = mock # type: ignore[attr-defined] + return wrapper + +def get_tensors_from(args, kwargs): + """ Returns a set of all Tensor objects in the given args and kwargs. """ + return set([arg for arg in args if isinstance(arg, Tensor)] + + [v for v in kwargs.values() if isinstance(v, Tensor)]) + + +# Returns scalar tensor representation of a list of integer byte values +def bytes_to_scalar(byte_list: List[int], dtype: torch.dtype, device: torch.device): + dtype_to_ctype: Dict[torch.dtype, Any] = { + torch.int8: ctypes.c_int8, + torch.uint8: ctypes.c_uint8, + torch.int16: ctypes.c_int16, + torch.int32: ctypes.c_int32, + torch.int64: ctypes.c_int64, + torch.bool: ctypes.c_bool, + torch.float32: ctypes.c_float, + torch.complex64: ctypes.c_float, + torch.float64: ctypes.c_double, + torch.complex128: ctypes.c_double, + } + ctype = dtype_to_ctype[dtype] + num_bytes = ctypes.sizeof(ctype) + + def check_bytes(byte_list): + for byte in byte_list: + assert 0 <= byte <= 255 + + if dtype.is_complex: + assert len(byte_list) == (num_bytes * 2) + check_bytes(byte_list) + real = ctype.from_buffer((ctypes.c_byte * num_bytes)( + *byte_list[:num_bytes])).value + imag = ctype.from_buffer((ctypes.c_byte * num_bytes)( + *byte_list[num_bytes:])).value + res = real + 1j * imag + else: + assert len(byte_list) == num_bytes + check_bytes(byte_list) + res = ctype.from_buffer((ctypes.c_byte * num_bytes)( + *byte_list)).value + + return torch.tensor(res, device=device, dtype=dtype) + + +def copy_func(f): + """Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)""" + g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, + argdefs=f.__defaults__, + closure=f.__closure__) + g = functools.update_wrapper(g, f) + g.__kwdefaults__ = f.__kwdefaults__ + return g + + +def xfail_inherited_tests(tests): + """ + Given a list of test names which are defined by a superclass of the + class this decorates, mark them as expected failure. This is useful + if you are doing poor man's parameterized tests by subclassing a generic + test class. + """ + def deco(cls): + for t in tests: + # NB: expectedFailure operates by mutating the method in question, + # which is why you have to copy the function first + setattr(cls, t, unittest.expectedFailure(copy_func(getattr(cls, t)))) + return cls + return deco + + +def skip_but_pass_in_sandcastle_if(condition, reason): + """ + Similar to unittest.skipIf, however in the sandcastle environment it just + "passes" the test instead to avoid creating tasks complaining about tests + skipping continuously. + """ + def decorator(func): + if condition: + if IS_SANDCASTLE: + @wraps(func) + def wrapper(*args, **kwargs): + print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr) + return wrapper + else: + func.__unittest_skip__ = True + func.__unittest_skip_why__ = reason + + return func + + return decorator + +def dtype_name(dtype): + """ Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """ + return str(dtype).split('.')[1] + + +dtype_abbrs = { + torch.bfloat16: 'bf16', + torch.float64: 'f64', + torch.float32: 'f32', + torch.float16: 'f16', + torch.complex32: 'c32', + torch.complex64: 'c64', + torch.complex128: 'c128', + torch.int8: 'i8', + torch.int16: 'i16', + torch.int32: 'i32', + torch.int64: 'i64', + torch.bool: 'b8', + torch.uint8: 'u8', +} + + +def set_single_threaded_if_parallel_tbb(fn): + """Set test to be single threaded for parallel tbb. + + See https://github.com/pytorch/pytorch/issues/64571#issuecomment-914691883 + """ + if not IS_TBB: + return fn + + @wraps(fn) + def wrap_fn(*args, **kwargs): + num_threads = torch.get_num_threads() + torch.set_num_threads(1) + try: + return fn(*args, **kwargs) + finally: + torch.set_num_threads(num_threads) + return wrap_fn + + +@functools.lru_cache +def get_cycles_per_ms() -> float: + """Measure and return approximate number of cycles per millisecond for torch.cuda._sleep + """ + + def measure() -> float: + start = torch.cuda.Event(enable_timing=True) + end = torch.cuda.Event(enable_timing=True) + start.record() + torch.cuda._sleep(1000000) + end.record() + end.synchronize() + cycles_per_ms = 1000000 / start.elapsed_time(end) + return cycles_per_ms + + # Get 10 values and remove the 2 max and 2 min and return the avg. + # This is to avoid system disturbance that skew the results, e.g. + # the very first cuda call likely does a bunch of init, which takes + # much longer than subsequent calls. + # + # Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs + # and seems to return stable values. Therefore, we enable caching + # using lru_cache decorator above. + num = 10 + vals = [] + for _ in range(num): + vals.append(measure()) + vals = sorted(vals) + return mean(vals[2 : num - 2]) + + +# OpInfo utils + +T = TypeVar('T') +def first_sample(self: unittest.TestCase, samples: Iterable[T]) -> T: + """ + Returns the first sample from an iterable of samples, like those returned by OpInfo. + The test will be skipped if no samples are available. + """ + try: + return next(iter(samples)) + except StopIteration as e: + raise unittest.SkipTest('Skipped! Need at least 1 sample input') from e + +# this helper method is to recursively +# clone the tensor-type input of operators tested by OpInfo +def clone_input_helper(input): + if isinstance(input, torch.Tensor): + return torch.clone(input) + + if isinstance(input, Sequence): + return tuple(map(clone_input_helper, input)) + + return input + +@contextmanager +def custom_op(opname, symbolic_fn, opset_version): + """Context manager/decorator to test ONNX export with custom operator""" + try: + register_custom_op_symbolic(opname, symbolic_fn, opset_version) + yield + finally: + unregister_custom_op_symbolic(opname, opset_version) + + +def outs_and_grads(fn, graph_inps, inps): + outs = fn(*graph_inps) + for out in pytree.tree_leaves(outs): + if isinstance(out, torch.Tensor) and out.requires_grad: + out.sum().backward(retain_graph=True) + grads = [inp.grad for inp in pytree.tree_leaves(inps) if isinstance(inp, torch.Tensor)] + for inp in pytree.tree_leaves(inps): + if isinstance(inp, torch.Tensor): + inp.grad = None + return outs, grads + +def compare_equal_outs_and_grads(test, m1, m2, inps): + r1, g1 = outs_and_grads(m1, inps, inps) + r2, g2 = outs_and_grads(m2, inps, inps) + test.assertEqual(r1, r2) + test.assertEqual(g1, g2) + +class TestGradients(TestCase): + exact_dtype = True + + # Copies inputs to inplace operations to avoid inplace modifications + # to leaves requiring gradient + def _get_safe_inplace(self, inplace_variant): + @wraps(inplace_variant) + def _fn(t, *args, **kwargs): + return inplace_variant(t.clone(), *args, **kwargs) + + return _fn + + def _check_helper(self, device, dtype, op, variant, check, *, check_forward_ad=False, check_backward_ad=True, + check_batched_grad=None, check_batched_forward_grad=False): + assert check in ('gradcheck', 'bwgrad_bwgrad', 'fwgrad_bwgrad') + # NB: check_backward_ad does not affect gradgradcheck (always True) + if variant is None: + self.skipTest("Skipped! Variant not implemented.") + if not op.supports_dtype(dtype, torch.device(device).type): + self.skipTest(f"Skipped! {op.name} does not support dtype {str(dtype)}") + + def is_inplace(variant): + if hasattr(variant, "__wrapped__"): + return variant.__wrapped__ is op.get_inplace() + return variant is op.get_inplace() + + include_conjugated_inputs = op.test_conjugated_samples and dtype.is_complex + + samples = op.sample_inputs(device, dtype, requires_grad=True, include_conjugated_inputs=include_conjugated_inputs, + small_inputs_only=TEST_WITH_SLOW_GRADCHECK) + + for sample in samples: + if sample.broadcasts_input and is_inplace(variant): + continue + + # Gradcheck expects tensors as its input, but autograd actually supports tensorlists + # and tensors passed as kwargs. The following creates a function that accepts just + # the tensors that require grad as varargs, and then recomposes them back into the + # original input. + + # Creates gradcheck inputs by identifying tensors requiring grad + all_args = None + if is_iterable_of_tensors(sample.input): + all_args = chain(sample.input, sample.args, sample.kwargs.values()) + else: + all_args = tuple(chain((sample.input,), sample.args, sample.kwargs.values())) + gradcheck_args = tuple(x for x in all_args if (isinstance(x, torch.Tensor) and x.requires_grad)) + + # Verifies sample input tensors should have no grad + # This may happen if the same tensor is used in two different SampleInputs + for t in gradcheck_args: + self.assertIsNone(t.grad, + "A sampled input has a gradient before running autograd. " + "This usually means that (at least) one input tensor is reused " + "across different SampleInputs. " + "Please create a new tensor for each SampleInput.") + + def _input_recomposition_helper(inputs, inp, input_idx): + if is_iterable_of_tensors(inp): + tensor_list = [] + for x in inp: + if isinstance(x, torch.Tensor) and x.requires_grad: + tensor_list.append(inputs[input_idx]) + input_idx = input_idx + 1 + else: + tensor_list.append(x) + return tensor_list, input_idx + elif isinstance(inp, torch.Tensor) and inp.requires_grad: + return inputs[input_idx], input_idx + 1 + else: + return inp, input_idx + + def fn(*inputs): + # Puts inputs back into sample properly + positional_args = [] + input_idx = 0 + inp, input_idx = _input_recomposition_helper(inputs, sample.input, input_idx) + positional_args.append(inp) + + for x in sample.args: + inp, input_idx = _input_recomposition_helper(inputs, x, input_idx) + positional_args.append(inp) + + # Recreates kwargs + kwargs = {} + for k, v in sample.kwargs.items(): + inp, input_idx = _input_recomposition_helper(inputs, v, input_idx) + kwargs[k] = inp + + output = op.gradcheck_wrapper(variant, *positional_args, **kwargs) + if sample.output_process_fn_grad is not None: + return sample.output_process_fn_grad(output) + return output + + if check == 'gradcheck': + if check_batched_grad is None: + check_batched_grad = op.check_batched_grad + self.assertTrue(gradcheck(fn, gradcheck_args, + check_batched_grad=check_batched_grad, + check_grad_dtypes=True, + nondet_tol=op.gradcheck_nondet_tol, + fast_mode=op.gradcheck_fast_mode, + check_forward_ad=check_forward_ad, + check_backward_ad=check_backward_ad, + check_undefined_grad=True, + check_batched_forward_grad=check_batched_forward_grad)) + elif check in ('bwgrad_bwgrad', 'fwgrad_bwgrad'): # gradgrad check + self.assertFalse(check_forward_ad, msg="Cannot run forward AD check for gradgradcheck") + for gen_non_contig_grad_outputs in (False, True): + kwargs = { + "gen_non_contig_grad_outputs": gen_non_contig_grad_outputs, + "check_batched_grad": op.check_batched_gradgrad, + "check_grad_dtypes": True, + "nondet_tol": op.gradcheck_nondet_tol, + "fast_mode": op.gradcheck_fast_mode + } + if check == "fwgrad_bwgrad": + kwargs["check_fwd_over_rev"] = True + kwargs["check_rev_over_rev"] = False + kwargs["check_batched_grad"] = False + kwargs["check_undefined_grad"] = False + + self.assertTrue(gradgradcheck(fn, gradcheck_args, **kwargs)) + else: + self.assertTrue(False, msg="Unknown check requested!") + + def _grad_test_helper(self, device, dtype, op, variant, *, check_forward_ad=False, check_backward_ad=True, + check_batched_grad=None, check_batched_forward_grad=False): + return self._check_helper(device, dtype, op, variant, 'gradcheck', check_forward_ad=check_forward_ad, + check_backward_ad=check_backward_ad, check_batched_grad=check_batched_grad, + check_batched_forward_grad=check_batched_forward_grad) + + def _skip_helper(self, op, device, dtype): + if dtype not in op.supported_backward_dtypes(torch.device(device).type): + self.skipTest("Skipped! Op doesn't support autograd for this dtype.") + if not op.supports_autograd and not op.supports_forward_ad: + self.skipTest("Skipped! autograd not supported.") + +def make_lazy_class(cls): + + def lazy_init(self, cb): + self._cb = cb + self._value = None + + cls.__init__ = lazy_init + + for basename in [ + "add", "sub", "mul", "truediv", "floordiv", "mod", "divmod", "pow", + "lshift", "rshift", "and", "or", "xor", "neg", "pos", "abs", "invert", + "eq", "ne", "lt", "le", "gt", "ge", "bool", "int", "index", + ]: + name = f"__{basename}__" + + def inner_wrapper(name): + use_operator = basename not in ("bool", "int") + + def wrapped(self, *args, **kwargs): + if self._cb is not None: + self._value = self._cb() + self._cb = None + if not use_operator: + return getattr(self._value, name)(*args, **kwargs) + else: + return getattr(operator, name)(self._value, *args, **kwargs) + return wrapped + + setattr(cls, name, inner_wrapper(name)) + + return cls + +@make_lazy_class +class LazyVal: + pass + + +def munge_exc(e, *, suppress_suffix=True, suppress_prefix=True, file=None, skip=0): + if file is None: + file = inspect.stack()[1 + skip].filename # skip one frame + + s = str(e) + + # Remove everything that looks like stack frames in NOT this file + def repl_frame(m): + if m.group(1) != file: + return "" + # Don't accept top-level, even for this script, these will wobble + # depending on how the testing script was invoked + if m.group(2) == "": + return "" + + return m.group(0) + + s = re.sub(r' File "([^"]+)", line \d+, in (.+)\n .+\n( +[~^]+ *\n)?', repl_frame, s) + s = re.sub(r"line \d+", "line N", s) + s = re.sub(file, os.path.basename(file), s) + s = re.sub(os.path.join(os.path.dirname(torch.__file__), ""), "", s) + s = re.sub(r"\\", "/", s) # for Windows + if suppress_suffix: + s = re.sub(r"\n*Set TORCH_LOGS.+", "", s, flags=re.DOTALL) + s = re.sub(r"\n*You can suppress this exception.+", "", s, flags=re.DOTALL) + if suppress_prefix: + s = re.sub(r"Cannot export model.+\n\n", "", s) + s = re.sub(r" +$", "", s, flags=re.M) + return s diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py new file mode 100644 index 0000000000000000000000000000000000000000..6562a9676131eaa70f9d791856d62aa8ab15f069 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/composite_compliance.py @@ -0,0 +1,580 @@ +import torch +from torch import Tensor +import itertools + +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten +from torch.utils import _pytree as pytree +from functools import partial +from torch.utils._mode_utils import no_dispatch, all_same_mode +import torch.autograd.forward_ad as fwAD +from typing import Callable +import re + + +def check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor): + elem = wrapper_tensor.elem + metadata_wrapper_tensor = metadata_accessor(wrapper_tensor) + metadata_elem = metadata_accessor(elem) + if metadata_wrapper_tensor == metadata_elem: + return + raise RuntimeError( + f"This operator is not Composite Compliant: the " + f"{metadata_name} of the tensor was modified directly without " + f"going through the PyTorch dispatcher.") + +def check_metadata_consistency(wrapper_tensor, CCT): + # CCT: CompositeCompliantTensor class which is generated using generate_cct + if not isinstance(wrapper_tensor, CCT): + return + things_to_check = { + 'shape': Tensor.size, + 'dtype': lambda x: x.dtype, + 'device': lambda x: x.device, + 'numel': Tensor.numel, + 'stride': Tensor.stride, + 'storage_offset': Tensor.storage_offset, + } + for metadata_name, metadata_accessor in things_to_check.items(): + check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor) + +def is_view_fn(func): + return func.overloadpacket.__name__ in { + 'as_strided', + 'detach', + 'diagonal', + 'expand', + 'expand_as', + 'movedim', + 'narrow', + 'permute', + 'select', + 'squeeze', + 'transpose', + 't', + 'real', + 'imag', + 'view_as_real', + 'view_as_complex', + 'unflatten', + 'unfold', + 'unsqueeze', + 'view', + 'view_as', + 'unbind', + 'split', + 'split_with_sizes', + 'vsplit', + 'hsplit', + 'tensor_split', + 'chunk', + 'swapaxes', + 'slice', + '_reshape_alias', + '_unsafe_view', + '_conj', + 'alias', + } + +# manually populated from native_functions that have inplace_view: True. +# In the future we will probably be able to grab that list directly +def is_inplace_view_fn(func): + return func.overloadpacket.__name__ in { + 'as_strided_', + 'detach_', + 'squeeze_', + 'swapaxes_', + 'swapdims_', + 't_', + 'transpose_', + 'unsqueeze_', + } + + +# Introspection please save us +def is_inplace(func): + name = func.overloadpacket.__name__ + if re.match('__i.+__', name): + return True + if re.match('__.+__', name): + return False + return name[-1] == '_' + + +def generate_cct_and_mode(autograd_view_consistency=True): + # This function returns a new class CompositeCompliantTensor + # The two arguments control the behaviour described below. + + # autograd_view_consistency: + # If True, alias result using `set_` if func returns a view + # (See Note [Alias Result]). + # Since Forward AD doesn't work with `set_` + # we disable it by setting alias to False. + + class CompositeCompliantTensor(torch.Tensor): + elem: torch.Tensor + + __slots__ = ['elem'] + __torch_function__ = torch._C._disabled_torch_function_impl + + @staticmethod + def __new__(cls, elem, mode, *args, **kwargs): + assert type(elem) is not cls, \ + "Wrapping a CompositeCompliantTensor in a CompositeCompliantTensor is not supported" + + # The storage of CompositeCompliantTensor should never be used directly + # by a Composite operation; if the Composite + # operator attempts to read from the storage without dispatching then it'll + # raise a RuntimeError due to it being a meta storage. + r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, elem.size(), + dtype=elem.dtype, layout=elem.layout, + device=elem.device, requires_grad=elem.requires_grad, + strides=elem.stride(), storage_offset=elem.storage_offset()) + + if elem.requires_grad: + # CompositeCompliantTensor steals the "requires_grad"-ness. + # Why a new copy of `elem`? Because sometimes OpInfo shares inputs between tests... + tmp = torch.empty_strided(elem.shape, elem.stride(), dtype=elem.dtype, + device=elem.device, layout=elem.layout, + requires_grad=False) + tmp.copy_(elem.detach()) + r.elem = tmp + else: + r.elem = elem + + assert r.stride() == r.elem.stride() + + # Propagate conjugate bits to the wrapper tensor + # Ref: https://github.com/albanD/subclass_zoo/issues/24 + # Ref: https://github.com/albanD/subclass_zoo/issues/21 + torch._C._set_conj(r, r.elem.is_conj()) + torch._C._set_neg(r, r.elem.is_neg()) + + r.mode = mode + return r + + def __repr__(self): + return f"CompositeCompliantTensor({self.elem})" + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + all_args = pytree.arg_tree_leaves(*args, **(kwargs or {})) + modes = tuple(e.mode for e in all_args if isinstance(e, CompositeCompliantTensor)) + if not all_same_mode(modes): + raise RuntimeError("Multiple CompositeCompliantTensorModes NYI") + with modes[0]: + return func(*args, **kwargs) + + class CompositeCompliantTensorMode(TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + def unwrap(e): + return e.elem if isinstance(e, CompositeCompliantTensor) else e + + def wrap(e): + return CompositeCompliantTensor(e, self) if isinstance(e, torch.Tensor) else e + + if func == torch.ops.aten._local_scalar_dense.default: + raise RuntimeError( + ".item() is not allowed to be called inside of composite " + "functions in the PyTorch library because not all backends " + "and/or Tensor subclasses (e.g. vmap, ProxyTensor) support them.") + + if func.overloadpacket.__name__ in ('set_', 'resize_'): + raise RuntimeError( + f"{func.__name__} is not allowed to be called inside of " + f"Composite operators.") + + if is_inplace(func): + # NB: We are making an assumption that if the function is in-place, + # then the first argument is being written to. Introspection please save us! + mutated_argument = args[0] + if not isinstance(mutated_argument, CompositeCompliantTensor) and \ + any(isinstance(a, CompositeCompliantTensor) for a in args[1:]): + raise RuntimeError( + 'Not composite compliant: performing in-place operation ' + f'{func.__name__} where the Tensor being written to is ' + 'regular Tensor but the other tensors are Tensor Subclasses. ' + 'Please try to avoid this in-place operation.') + + unwrapped_args = tree_map(unwrap, args) + unwrapped_kwargs = tree_map(unwrap, kwargs) + unwrapped_rs = func(*unwrapped_args, **unwrapped_kwargs) + rs = tree_map(wrap, unwrapped_rs) + + if is_view_fn(func) and autograd_view_consistency: + # Note [Alias Result] + # Autograd asserts that for B = A.view_fn(...), B and A's storages + # are the same. Here we try to make B alias A to avoid those asserts. + # See https://github.com/pytorch/pytorch/issues/65339 for more information + # about the issue. + with no_dispatch(): + # Idea: this is a weird way of getting a storage that aliases the input. + # This is a workaround for #65339. + # 1. under no_dispatch, all of the wrapper tensors look like regular + # tensors with special storage (the storage is nullptr and + # advertises CPU/CUDA device. + # 2. we run func, which ends up running the view operation + # 3. All view operations reuse the input's storage and return + # result Tensor(s) with new sizes/strides/offset that alias + # the input. + # 4. we set the storage (and sizes/strides/offset) of the wrapper + # tensor results to be that of the tensors that alias the input + result = func(*args, **kwargs) + if isinstance(result, (tuple, list)): + for a, b in zip(rs, result): + a.set_(b) + else: + rs.set_(result) + + # Some operations are allowed to in-place modify the metadata of the + # inputs. The only ones are the "inplace view functions"; when we + # run into these, we manually modify the metadata of the input. + with no_dispatch(): + if is_inplace_view_fn(func): + func(*args, **kwargs) + + # For each CompositeCompliantTensor t, we check that t and t.elem + # have consistent metadata. If they don't have consistent metadata, + # that means the operator did something fishy. + check = partial(check_metadata_consistency, CCT=CompositeCompliantTensor) + pytree.tree_map_(check, args) + pytree.tree_map_(check, kwargs) + pytree.tree_map_(check, rs) + return rs + + return CompositeCompliantTensor, CompositeCompliantTensorMode() + +def is_tensorlist(lst): + if not isinstance(lst, list) and not isinstance(lst, tuple): + return False + if len(lst) == 0: + return False + all_tensors = all(isinstance(elt, torch.Tensor) for elt in lst) + if all_tensors: + return True + exists_one_tensor = all(isinstance(elt, torch.Tensor) for elt in lst) + if exists_one_tensor: + raise RuntimeError('This test assumes that PyTorch APIs cannot take ' + 'mixed lists of Tensor and other things') + return False + + +def maybe_map(fn, should_map, arg): + return fn(arg) if should_map else arg + + +def wrap(arg, CCT, cct_mode): + # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode + if isinstance(arg, torch.Tensor): + return CCT(arg, cct_mode) + if is_tensorlist(arg): + return [CCT(a, cct_mode) for a in arg] + raise RuntimeError("wrap assumes that the input can be wrapped") + + +# Given a list of flat arguments, some of which may be Tensors, return all +# possible ways some of the arguments could be CompositeCompliantTensors (CCT). +# For example, given Tensors A, B, C and flat_args = [A, 1, B], +# We would return the following 4 options: +# [CCT(A), 1, CCT(B)] +# [CCT(A), 1, B] +# [A, 1, CCT(B)] +# [A, 1, B] +# NB: Yes, this is exponential. No, we don't care too much because PyTorch ops +# don't accept that many input Tensors. +def generate_subclass_choices(flat_args, CCT, cct_mode): + # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode + is_tensor_likes = [isinstance(arg, torch.Tensor) or is_tensorlist(arg) for arg in flat_args] + subclass_options = [[False, True] if is_tensor_like else [False] for is_tensor_like in is_tensor_likes] + + for which_args_are_wrapped in itertools.product(*subclass_options): + + result = [maybe_map(partial(wrap, CCT=CCT, cct_mode=cct_mode), should_wrap_arg, arg) + for should_wrap_arg, arg in zip(which_args_are_wrapped, flat_args)] + yield result, which_args_are_wrapped + + +# For an operation f(*args, **kwargs), each Tensor argument may either be +# a regular Tensor or a Tensor Subclass. This iterator iterates through +# all of those options. +def generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode + flat_kwargs, spec = tree_flatten(kwargs) + flat_args_kwargs = list(args) + list(flat_kwargs) + for choice, debug_metadata in generate_subclass_choices(flat_args_kwargs, CCT, cct_mode): + new_args = choice[:len(args)] + new_kwargs = tree_unflatten(choice[len(args):], spec) + which_args_are_wrapped = debug_metadata[:len(args)] + which_kwargs_are_wrapped = tree_unflatten(debug_metadata[len(args):], spec) + yield new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped + + +def raise_composite_compliance_error(err, additional_info=''): + raise RuntimeError( + "Composite compliance check failed with " + "the above error.\n" + f"{additional_info}" + "If you are adding an OpInfo of an " + "existing operator, please feel free to skip this test " + "because the problem was pre-existing and file an issue. " + "Otherwise, if you added a new operator, please read " + "through the Composite Compliance section in " + "aten/src/ATen/native/README.md for how to resolve this. " + ) from err + + +# This test checks ALL possible permutations of calling `op` with arguments +# that are individually either a regular Tensor or a Tensor subclass. +# +# The general strategy is to wrap some Tensor args and kwargs in +# CompositeCompliantTensor wrappers and call the operation. + +# If some composite operation does any non-compliant behavior, +# CompositeCompliantTensor will raise an error. +def check_all_permutations(op, args, kwargs, assert_equal_fn): + CCT, cct_mode = generate_cct_and_mode() + expected = op(*args, **kwargs) + for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice + + try: + actual = op(*new_args, **new_kwargs) + # NOTE: [What errors are Composite Compliance trying to catch?] + # + # There's two things we want to catch: + # - errors that would raise within the torch_dispatch impl + # - data_ptr accesses + # The first is easy to filter for (we could make the error a different + # error class), the second is always going to be a RuntimeError due to + # how it is implemented (if you try to access the data_ptr of thex + # wrapper Tensor, it raises you some internal RuntimeError). + # + # So the most general thing to catch here was RuntimeError. If you + # are here and debugging why your test failed, it's plausible that + # the operator itself is broken and that there are other tests failing. + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + ) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + assert_equal_fn(tree_map(unwrap, actual), expected) + +# Checks via the usage of torch dispatch mode certain anti-patterns that +# are not composite compliant. +# +# In particular, the anti-pattern we are trying to prevent is a user +# creating an empty tensor and then resize_-ing it. Torch Dispatch Mode helps +# here because all factory functions will create tensors that are +# CompositeCompliantTensor. +# +# The general strategy is to wrap all Tensor args and kwargs in +# CompositeCompliantTensor wrappers. If an operator that is +# Composite does any non-compliant behavior, +# CompositeCompliantTensor will raise an error. +def check_with_mode(op, args, kwargs, assert_equal_fn): + CCT, cct_mode = generate_cct_and_mode() + + def wrap(e): + return CCT(e, cct_mode) if isinstance(e, torch.Tensor) else e + + expected = op(*args, **kwargs) + + args = tree_map(wrap, args) + kwargs = tree_map(wrap, kwargs) + try: + with cct_mode: + actual = op(*args, **kwargs) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error(err) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + assert_equal_fn(tree_map(unwrap, actual), expected) + +def gather_leaf_tensors(args, kwargs): + leaf_tensors = [] + args, args_spec = tree_flatten(args) + kwargs, kwargs_spec = tree_flatten(kwargs) + args = args + kwargs + for arg in args: + if not isinstance(arg, torch.Tensor): + continue + if arg.requires_grad: + leaf_tensors.append(arg) + return leaf_tensors + + +def compute_expected_grads(op, args, kwargs, output_process_fn_grad=None, gradcheck_wrapper=None): + if gradcheck_wrapper is None: + results = op(*args, **kwargs) + else: + results = gradcheck_wrapper(op, *args, **kwargs) + + if output_process_fn_grad is not None: + results = output_process_fn_grad(results) + + flat_results = pytree.tree_leaves(results) + flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)] + flat_diff_results = [r for r in flat_results if r.requires_grad] + assert len(flat_diff_results) > 0 + + grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype) for r in flat_diff_results] + leaf_tensors = gather_leaf_tensors(args, kwargs) + assert len(leaf_tensors) > 0 + return torch.autograd.grad(flat_diff_results, leaf_tensors, + grads, allow_unused=True, retain_graph=True) + + +# Checks if the backward formula is composite compliant by testing +# all possible permutations of {inputs, grad_outputs} being +# CompositeCompliantTensor or regular Tensors. +# +# NB: it is important that op is accepted as a Callable and not an OpInfo, +# this means we can apply check_backward_formula to things that aren't OpInfos +# while debugging. +def check_backward_formula(op: Callable, args, kwargs, + output_process_fn_grad=None, + gradcheck_wrapper=None, assert_equal_fn=None): + CCT, cct_mode = generate_cct_and_mode() + + expected = compute_expected_grads(op, args, kwargs, output_process_fn_grad, gradcheck_wrapper) + + for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice + leaf_tensors = gather_leaf_tensors(new_args, new_kwargs) + assert len(leaf_tensors) > 0 + + try: + if gradcheck_wrapper is None: + results = op(*new_args, **new_kwargs) + else: + results = gradcheck_wrapper(op, *new_args, **new_kwargs) + if output_process_fn_grad is not None: + results = output_process_fn_grad(results) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + ) + + flat_results = pytree.tree_leaves(results) + flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)] + flat_diff_results = [r for r in flat_results if r.requires_grad] + assert len(flat_diff_results) > 0 + + # NB: ones, not ones_like, so we get a regular Tensor here + grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype) + for r in flat_diff_results] + for flat_new_grads, which_grad_is_batched in generate_subclass_choices(grads, CCT, cct_mode): + try: + actual = torch.autograd.grad(flat_diff_results, leaf_tensors, flat_new_grads, + allow_unused=True, retain_graph=True) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + f"- wrapped_grads: {which_grad_is_batched}\n" + ) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + assert_equal_fn(tuple(map(unwrap, actual)), expected, equal_nan=True) + +# Checks if the forward AD formula is composite compliant by testing +# all possible permutations of {primals, tangents} being +# CompositeCompliantTensor or regular Tensors. +# +# NB: it is important that op is accepted as a Callable and not an OpInfo, +# this means we can apply check_forward_ad_formula to things that aren't OpInfos +# while debugging. +def check_forward_ad_formula(op: Callable, args, kwargs, gradcheck_wrapper=None, assert_equal_fn=None): + CCT, cct_mode = generate_cct_and_mode(autograd_view_consistency=False) + + def maybe_tangent(t): + assert type(t) is not CCT + # Generate `tangent` tensor + # if given object is a Tensor and requires grad is set. + if isinstance(t, torch.Tensor) and t.requires_grad: + return torch.randn_like(t) + elif is_tensorlist(t): + return [torch.randn_like(e) if e.requires_grad else None for e in t] + return None + + tangent_args = tuple(maybe_tangent(arg) for arg in args) + flat_kwargs, spec = tree_flatten(kwargs) + flat_tangent_kwargs = tuple(maybe_tangent(arg) for arg in flat_kwargs) + tangent_kwargs = tree_unflatten(flat_tangent_kwargs, spec) + + with fwAD.dual_level(): + def maybe_make_dual(dual): + # Returns dual tensor if primal is a tensor/tensor subclass + # with requires_grad set. + primal, tangent = dual + if isinstance(primal, torch.Tensor) and primal.requires_grad: + return fwAD.make_dual(primal.detach(), tangent) + elif is_tensorlist(primal): + return tuple(fwAD.make_dual(pri.detach(), tang) if tang is not None else pri + for pri, tang in zip(primal, tangent)) + return primal + + def compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs): + op_args = tuple(map(maybe_make_dual, zip(args, tangent_args))) + op_kwargs = {k: maybe_make_dual((v, tangent_kwargs[k])) for k, v in kwargs.items()} + + if gradcheck_wrapper is None: + return op(*op_args, **op_kwargs) + return gradcheck_wrapper(op, *op_args, **op_kwargs) + + expected = compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs) + expected = tree_map(fwAD.unpack_dual, expected) + expected_primals = tree_map(lambda x: x.primal, expected) + expected_tangents = tree_map(lambda x: x.tangent, expected) + + # Permutations of arg and kwargs in CCT. + for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): + new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice + + # Permutations tangent arg and tangent kwargs in CCT. + for tang_choice in generate_subclass_choices_args_kwargs(tangent_args, tangent_kwargs, CCT, cct_mode): + new_tang_args, new_tang_kwargs, \ + which_tang_args_are_wrapped, which_tang_kwargs_are_wrapped = tang_choice + + op_args = tuple(map(maybe_make_dual, zip(new_args, new_tang_args))) + op_kwargs = {k: maybe_make_dual((v, new_tang_kwargs[k])) for k, v in new_kwargs.items()} + + try: + if gradcheck_wrapper is None: + actual = op(*op_args, **op_kwargs) + else: + actual = gradcheck_wrapper(op, *op_args, **op_kwargs) + # see NOTE: [What errors are Composite Compliance trying to catch?] + except RuntimeError as err: + raise_composite_compliance_error( + err, + f"- wrapped_args: {which_args_are_wrapped}\n" + f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" + f"- wrapped_tangent_args: {which_tang_args_are_wrapped}\n" + f"- wrapped_tangent_kwargs: {which_tang_kwargs_are_wrapped}\n" + ) + + def unwrap(e): + return e.elem if isinstance(e, CCT) else e + + actual = tree_map(fwAD.unpack_dual, actual) + actual_primals = tree_map(lambda x: unwrap(x.primal), actual) + actual_tangents = tree_map(lambda x: unwrap(x.tangent), actual) + assert_equal_fn(actual_primals, expected_primals, equal_nan=True) + assert_equal_fn(actual_tangents, expected_tangents, equal_nan=True) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/control_flow_opinfo_db.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/control_flow_opinfo_db.py new file mode 100644 index 0000000000000000000000000000000000000000..f10dd91cc109e7db221cc336d8b3afa63a52e2e0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/control_flow_opinfo_db.py @@ -0,0 +1,75 @@ +import torch +import functools +from torch.testing import make_tensor +from functorch.experimental.control_flow import map +from torch.testing._internal.opinfo.core import ( + OpInfo, + SampleInput, +) +from torch.testing._internal.common_dtype import all_types_and + +def sample_inputs_map(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial( + make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + yield SampleInput([make_arg(2, 2, 2, low=0.1, high=2), make_arg(2, 2, 2, low=0.1, high=2)], + args=(make_arg(1, low=0.1, high=2), make_arg(1, low=0.1, high=2))) + +def inner_f(x, y0, y1): + return [x[0].cos().add_(1.) * y0, (x[1] + y1.sin()).cos_().view(x[1].size())] + +def simple_map(xs, y0, y1): + def f(x, y0, y1): + return inner_f(x, y0, y1) + return map(f, xs, y0, y1) + +def nested_map(xs, y0, y1): + def f1(xx, y0, y1): + def f2(x, y0, y1): + return inner_f(x, y0, y1) + return map(f2, xx, y0, y1) + return map(f1, xs, y0, y1) + +def triple_nested_map(xs, y0, y1): + def f0(xs, y0, y1): + def f1(xx, y0, y1): + def f2(x, y0, y1): + return inner_f(x, y0, y1) + return map(f2, xx, y0, y1) + return map(f1, xs, y0, y1) + return map(f0, xs, y0, y1) + +control_flow_opinfo_db = [ + OpInfo( + "MapControlflowOp", + op=simple_map, + sample_inputs_func=sample_inputs_map, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + ), + OpInfo( + "NestedMapControlflowOp", + op=nested_map, + sample_inputs_func=sample_inputs_map, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + ), + OpInfo( + "TripleNestedMapControlflowOp", + op=triple_nested_map, + sample_inputs_func=sample_inputs_map, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + check_batched_grad=False, + check_batched_gradgrad=False, + check_batched_forward_grad=False, + check_inplace_batched_forward_grad=False, + ) +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py new file mode 100644 index 0000000000000000000000000000000000000000..4a325ec845825f756fb5912197a261b1b1a9b455 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/custom_op_db.py @@ -0,0 +1,454 @@ +import torch +import functools +from torch.testing import make_tensor +from torch.testing._internal.opinfo.core import ( + OpInfo, + SampleInput, +) +from torch.testing._internal.common_dtype import all_types_and +import numpy as np +from torch.testing._internal.autograd_function_db import ( + sample_inputs_numpy_cube, + sample_inputs_numpy_mul, + sample_inputs_numpy_sort, + sample_inputs_numpy_take, +) +from torch import Tensor +from torch.types import Number +from typing import * # noqa: F403 +import torch._custom_ops as custom_ops + +# Note: [custom op db] +# +# This is a collection of custom operator test cases written as OpInfos +# so they can easily be consumed by OpInfo-based tests to check if subsystems +# support them correctly. + +def to_numpy(tensor): + return tensor.cpu().numpy() + +@custom_ops.custom_op('_torch_testing::numpy_cube') +def numpy_cube(x: Tensor) -> Tuple[Tensor, Tensor]: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_cube') +def numpy_cube_impl(x): + x_np = to_numpy(x) + dx = torch.tensor(3 * x_np ** 2, device=x.device) + return torch.tensor(x_np ** 3, device=x.device), dx + +@custom_ops.impl_abstract('_torch_testing::numpy_cube') +def numpy_cube_abstract(x): + return x.clone(), x.clone() + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_cube') +def numpy_cube_save_for_backward(inputs, output): + return (inputs.x, output[1]) + +@custom_ops.impl_backward('_torch_testing::numpy_cube') +def numpy_cube_backward(ctx, saved, grad_out, grad_dx): + x, dx = saved + grad_x = torch.ops._torch_testing.numpy_mul(grad_out, dx) + 6 * torch.ops._torch_testing.numpy_mul(grad_dx, x) + return {'x': grad_x} + +@custom_ops.custom_op('_torch_testing::numpy_mul') +def numpy_mul(x: Tensor, y: Tensor) -> Tensor: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_mul') +def numpy_mul_impl(x, y): + return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device) + +@custom_ops.impl_abstract('_torch_testing::numpy_mul') +def numpy_mul_abstract(x, y): + assert x.device == y.device + return (x * y).contiguous() + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_mul') +def numpy_mul_save_for_backward(inputs, output): + saved = {} + saved['x_requires_grad'] = inputs.x.requires_grad + saved['y_requires_grad'] = inputs.y.requires_grad + # Optimization: only save what is necessary + saved['y'] = inputs.y if inputs.x.requires_grad else None + saved['x'] = inputs.x if inputs.y.requires_grad else None + return saved + +@custom_ops.impl_backward('_torch_testing::numpy_mul') +def numpy_mul_backward(ctx, saved, grad_out): + grad_x = grad_out * saved['y'] if saved['x_requires_grad'] else None + grad_y = grad_out * saved['x'] if saved['x_requires_grad'] else None + return {'y': grad_y, 'x': grad_x} + +@custom_ops.custom_op('_torch_testing::numpy_sort') +def numpy_sort(x: Tensor, dim: int) -> Tuple[Tensor, Tensor, Tensor]: + raise NotImplementedError() + +@custom_ops.impl("_torch_testing::numpy_sort") +def numpy_sort_impl(x, dim): + device = x.device + x = to_numpy(x) + ind = np.argsort(x, axis=dim) + ind_inv = np.argsort(ind, axis=dim) + result = np.take_along_axis(x, ind, axis=dim) + return ( + torch.tensor(result, device=device), + torch.tensor(ind, device=device), + torch.tensor(ind_inv, device=device), + ) + +@custom_ops.impl_abstract('_torch_testing::numpy_sort') +def numpy_sort_abstract(x, dim): + return torch.empty_like(x), torch.empty_like(x, dtype=torch.long), torch.empty_like(x, dtype=torch.long) + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_sort') +def numpy_sort_save_for_backward(inputs, output): + out, ind, ind_inv = output + return [inputs.dim, ind, ind_inv] + +@custom_ops.impl_backward('_torch_testing::numpy_sort', output_differentiability=[True, False, False]) +def numpy_sort_backward(ctx, saved, grad_out, grad_ind, grad_ind_inv): + dim, ind, ind_inv = saved + return {'x': torch.ops._torch_testing.numpy_take(grad_out, ind_inv, ind, dim)} + +@custom_ops.custom_op('_torch_testing::numpy_take') +def numpy_take(x: Tensor, ind: Tensor, ind_inv: Tensor, dim: int) -> Tensor: + raise NotImplementedError() + +@custom_ops.impl("_torch_testing::numpy_take") +def numpy_take_impl(x, ind, ind_inv, dim): + device = x.device + x = to_numpy(x) + ind = to_numpy(ind) + return torch.tensor(np.take_along_axis(x, ind, dim), device=device) + +@custom_ops.impl_abstract('_torch_testing::numpy_take') +def numpy_take_abstract(x, ind, ind_inv, dim): + assert x.device == ind.device + assert x.device == ind_inv.device + assert ind.dtype == torch.long + assert ind_inv.dtype == torch.long + return torch.empty_like(x) + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_take') +def numpy_take_save_for_backward(inputs, output): + return { + 'dim': inputs.dim, + 'ind': inputs.ind, + 'ind_inv': inputs.ind_inv, + } + +@custom_ops.impl_backward('_torch_testing::numpy_take') +def numpy_take_backward(ctx, saved, grad_out): + return { + 'x': torch.ops._torch_testing.numpy_take(grad_out, saved['ind_inv'], saved['ind'], saved['dim']), + 'ind': None, + 'ind_inv': None, + } + +@custom_ops.custom_op('_torch_testing::numpy_nonzero') +def numpy_nonzero(x: Tensor) -> Tensor: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_nonzero') +def numpy_nonzero_impl(x): + x_np = to_numpy(x) + res = np.stack(np.nonzero(x_np), axis=1) + if res.shape[0] <= 1: + raise RuntimeError("not supported") + return torch.tensor(res, device=x.device) + +@custom_ops.impl_abstract('_torch_testing::numpy_nonzero') +def numpy_nonzero_abstract(x): + ctx = torch._custom_op.impl.get_ctx() + i0 = ctx.create_unbacked_symint() + shape = [i0, x.dim()] + result = x.new_empty(shape, dtype=torch.long) + return result + +def sample_inputs_numpy_nonzero(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + shape = 10 + result = make_arg(shape, low=0.9, high=2) + mask = make_tensor(shape, low=0, high=2, device=device, dtype=torch.long) + with torch.no_grad(): + result *= mask + + yield SampleInput(result, args=()) + +@custom_ops.custom_op('_torch_testing::numpy_view_copy') +def numpy_view_copy(x: Tensor, shape: Sequence[int]) -> Tensor: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_view_copy') +def numpy_view_copy_impl(x, shape) -> Tensor: + return torch.tensor(np.copy(to_numpy(x).reshape(shape)), device=x.device) + +@custom_ops.impl_abstract('_torch_testing::numpy_view_copy') +def numpy_view_copy_abstract(x, shape) -> Tensor: + return x.clone().view(shape).clone() + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_view_copy') +def numpy_view_copy_save_for_backward(inputs, output) -> Tensor: + return inputs.x.shape + +@custom_ops.impl_backward('_torch_testing::numpy_view_copy') +def numpy_view_copy_backward(ctx, x_shape, grad_out) -> Tensor: + return {'x': torch.ops._torch_testing.numpy_view_copy(grad_out, x_shape)} + +def sample_inputs_numpy_view_copy(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + result = make_arg(2, 3, 4, low=0.9, high=2) + yield SampleInput(result, args=([2, 12],)) + +@custom_ops.custom_op('_torch_testing::numpy_cat') +def numpy_cat(xs: Sequence[Tensor], dim: int) -> Tensor: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_cat') +def numpy_cat_impl(xs, dim): + assert len(xs) > 0 + assert all(x.device == xs[0].device for x in xs) + assert all(x.dtype == xs[0].dtype for x in xs) + np_xs = [to_numpy(x) for x in xs] + np_out = np.concatenate(np_xs, axis=dim) + return torch.tensor(np_out, device=xs[0].device) + +@custom_ops.impl_abstract('_torch_testing::numpy_cat') +def numpy_cat_abstract(xs, dim): + assert len(xs) > 0 + assert all(x.device == xs[0].device for x in xs) + assert all(x.dtype == xs[0].dtype for x in xs) + return torch.cat(xs, dim=dim) + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_cat') +def numpy_cat_save_for_backward(inputs, output): + dim_sizes = [x.shape[inputs.dim] for x in inputs.xs] + return dim_sizes, inputs.dim + +@custom_ops.impl_backward('_torch_testing::numpy_cat') +def numpy_cat_backward(ctx, saved, grad_out): + dim_sizes, dim = saved + splits = list(np.cumsum(dim_sizes)[:-1]) + grad_xs = torch.ops._torch_testing.numpy_split_copy(grad_out, splits, dim) + return {'xs': grad_xs} + +def sample_inputs_numpy_cat(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + r0 = make_arg(2, 3, 4, low=0.9, high=2) + r1 = make_arg(4, 3, 4, low=0.9, high=2) + r2 = make_arg(5, 3, 4, low=0.9, high=2) + yield SampleInput([r0, r1, r2], args=(0,)) + +@custom_ops.custom_op('_torch_testing::numpy_split_copy') +def numpy_split_copy(x: Tensor, sections: Sequence[int], dim: int) -> List[Tensor]: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_split_copy') +def numpy_split_copy_impl(x, splits, dim): + x_np = to_numpy(x) + arrs = np.split(x_np, splits, axis=dim) + return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs] + +@custom_ops.impl_abstract('_torch_testing::numpy_split_copy') +def numpy_split_copy_abstract(x, splits, dim): + return [xi.clone() for xi in torch.tensor_split(x, splits, dim)] + +@custom_ops.impl_save_for_backward('_torch_testing::numpy_split_copy') +def numpy_split_copy_save_for_backward(inputs, output): + return inputs.dim + +@custom_ops.impl_backward('_torch_testing::numpy_split_copy') +def numpy_split_copy_backward(ctx, saved, grad_out): + dim = saved + return {'x': torch.ops._torch_testing.numpy_cat(grad_out, dim=dim)} + +def sample_inputs_numpy_split_copy(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) + x = make_arg(2, 9, low=0.9, high=2) + yield SampleInput(x, args=([1, 3, 6], 1)) + +@custom_ops.custom_op('_torch_testing::numpy_split_copy_with_int') +def numpy_split_copy_with_int(x: Tensor, sections: Sequence[int], dim: int) -> Tuple[List[Tensor], int]: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_split_copy_with_int') +def numpy_split_copy_with_int_impl(x, splits, dim): + x_np = to_numpy(x) + arrs = np.split(x_np, splits, axis=dim) + return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs], len(splits) + +@custom_ops.impl_abstract('_torch_testing::numpy_split_copy_with_int') +def numpy_split_copy_with_int_abstract(x, splits, dim): + return [xi.clone() for xi in torch.tensor_split(x, splits, dim)], len(splits) + +@custom_ops.impl_save_for_backward( + '_torch_testing::numpy_split_copy_with_int') +def numpy_split_copy_with_int_save_for_backward(inputs, output): + return inputs.dim + +@custom_ops.impl_backward( + '_torch_testing::numpy_split_copy_with_int', + output_differentiability=[True, False]) +def numpy_split_copy_with_int_backward(ctx, saved, grad_out, _): + dim = saved + return {'x': torch.ops._torch_testing.numpy_cat(grad_out, dim=dim)} + +@custom_ops.custom_op('_torch_testing::numpy_nms') +def numpy_nms(boxes: Tensor, scores: Tensor, iou_threshold: Number) -> Tensor: + raise NotImplementedError() + +@custom_ops.impl('_torch_testing::numpy_nms') +def numpy_nms_impl(boxes, scores, iou_threshold): + # Adapted from Ross Girshick's fast-rcnn implementation at + # https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py + assert boxes.device == scores.device + device = boxes.device + + boxes = to_numpy(boxes) + scores = to_numpy(scores) + + N = boxes.shape[0] + assert boxes.shape == (N, 4) + assert scores.shape == (N,) + + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + + inds = np.where(ovr <= iou_threshold)[0] + order = order[inds + 1] + + result = torch.tensor(np.stack(keep), device=device) + # Needed for data-dependent condition :( + assert result.size(0) >= 2 + return result + +@custom_ops.impl_abstract('_torch_testing::numpy_nms') +def numpy_nms_abstract(boxes, scores, iou_threshold): + assert boxes.device == scores.device + N = boxes.shape[0] + assert boxes.shape == (N, 4) + assert scores.shape == (N,) + + ctx = torch._custom_op.impl.get_ctx() + i0 = ctx.create_unbacked_symint() + result = boxes.new_empty([i0], dtype=torch.int64) + return result + +def sample_inputs_numpy_nms(opinfo, device, dtype, requires_grad, **kwargs): + make_arg = functools.partial(make_tensor, device=device, dtype=dtype) + N = 64 + xs = make_arg([N], low=0, high=28) + dx = make_arg([N], low=0, high=4) + ys = make_arg([N], low=0, high=28) + dy = make_arg([N], low=0, high=4) + boxes = torch.stack([xs, ys, xs + dx, ys + dy], dim=1).requires_grad_(requires_grad) + scores = make_arg([N], low=0, high=1, requires_grad=requires_grad) + iou_threshold = make_arg([], low=0, high=1).item() + + yield SampleInput(boxes, args=(scores, iou_threshold)) + +custom_op_db = [ + OpInfo( + 'NumpyCubeCustomOp', + op=torch.ops._torch_testing.numpy_cube, + sample_inputs_func=sample_inputs_numpy_cube, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyMulCustomOp', + op=torch.ops._torch_testing.numpy_mul, + sample_inputs_func=sample_inputs_numpy_mul, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpySortCustomOp', + op=torch.ops._torch_testing.numpy_sort, + sample_inputs_func=sample_inputs_numpy_sort, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyTakeCustomOp', + op=torch.ops._torch_testing.numpy_take, + sample_inputs_func=sample_inputs_numpy_take, + dtypes=all_types_and(torch.bool, torch.half), + supports_out=False, + ), + OpInfo( + 'NumpyNonzeroCustomOp', + op=torch.ops._torch_testing.numpy_nonzero, + sample_inputs_func=sample_inputs_numpy_nonzero, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=False, + supports_out=False, + ), + OpInfo( + 'NumpyNMSCustomOp', + op=torch.ops._torch_testing.numpy_nms, + sample_inputs_func=sample_inputs_numpy_nms, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=False, + supports_out=False, + ), + OpInfo( + 'NumpyViewCopyCustomOp', + op=torch.ops._torch_testing.numpy_view_copy, + sample_inputs_func=sample_inputs_numpy_view_copy, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=True, + supports_out=False, + ), + OpInfo( + 'NumpyCatCustomOp', + op=torch.ops._torch_testing.numpy_cat, + sample_inputs_func=sample_inputs_numpy_cat, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + ), + OpInfo( + 'NumpySplitCopyCustomOp', + op=torch.ops._torch_testing.numpy_split_copy, + sample_inputs_func=sample_inputs_numpy_split_copy, + dtypes=all_types_and(torch.bool, torch.half), + supports_autograd=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + ), + OpInfo( + 'NumpySplitCopyWithIntCustomOp', + op=torch.ops._torch_testing.numpy_split_copy_with_int, + sample_inputs_func=sample_inputs_numpy_split_copy, + dtypes=all_types_and(torch.bool, torch.half), + gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs)[0], + supports_autograd=True, + check_batched_grad=False, + check_batched_gradgrad=False, + supports_out=False, + ), +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..12eb60d15d6a1ede29fad6197884dc74e54e9ec9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/dist_utils.py @@ -0,0 +1,204 @@ +import re +import sys +import time +from functools import partial, wraps +from typing import Tuple + +import torch.distributed as dist +import torch.distributed.rpc as rpc +from torch.distributed.rpc import _rref_context_get_debug_info +from torch.testing._internal.common_utils import FILE_SCHEMA, TEST_WITH_TSAN + + +if not dist.is_available(): + print("c10d not available, skipping tests", file=sys.stderr) + sys.exit(0) + + +INIT_METHOD_TEMPLATE = FILE_SCHEMA + "{file_name}" + +def dist_init( + old_test_method=None, + setup_rpc: bool = True, + clean_shutdown: bool = True, + faulty_messages=None, + messages_to_delay=None, +): + """ + We use this decorator for setting up and tearing down state since + MultiProcessTestCase runs each `test*` method in a separate process and + each process just runs the `test*` method without actually calling + 'setUp' and 'tearDown' methods of unittest. + + Note: pass the string representation of MessageTypes that should be used + with the faulty agent's send function. By default, all retriable messages + ("RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT", "RREF_USER_DELETE", + "CLEANUP_AUTOGRAD_CONTEXT_REQ") will use the faulty send (this default is + set from faulty_rpc_agent_test_fixture.py). + """ + # If we use dist_init without arguments (ex: @dist_init), old_test_method is + # appropriately set and we return the wrapper appropriately. On the other + # hand if dist_init has arguments (ex: @dist_init(clean_shutdown=False)), + # old_test_method is None and we return a functools.partial which is the real + # decorator that is used and as a result we recursively call dist_init with + # old_test_method and the rest of the arguments appropriately set. + if old_test_method is None: + return partial( + dist_init, + setup_rpc=setup_rpc, + clean_shutdown=clean_shutdown, + faulty_messages=faulty_messages, + messages_to_delay=messages_to_delay, + ) + + @wraps(old_test_method) + def new_test_method(self, *arg, **kwargs): + # Setting _ignore_rref_leak to make sure OwnerRRefs are properly deleted + # in tests. + import torch.distributed.rpc.api as api + + api._ignore_rref_leak = False + self.worker_id = self.rank + self.setup_fault_injection(faulty_messages, messages_to_delay) + + rpc_backend_options = self.rpc_backend_options + if setup_rpc: + if TEST_WITH_TSAN: + # TSAN runs much slower. + rpc_backend_options.rpc_timeout = rpc.constants.DEFAULT_RPC_TIMEOUT_SEC * 5 + rpc.constants.DEFAULT_SHUTDOWN_TIMEOUT = 60 + + rpc.init_rpc( + name="worker%d" % self.rank, + backend=self.rpc_backend, + rank=self.rank, + world_size=self.world_size, + rpc_backend_options=rpc_backend_options, + ) + + return_value = old_test_method(self, *arg, **kwargs) + + if setup_rpc: + rpc.shutdown(graceful=clean_shutdown) + + return return_value + + return new_test_method + + +def noop() -> None: + pass + + +def wait_until_node_failure(rank: int, expected_error_regex: str = ".*") -> str: + """ + Loops until an RPC to the given rank fails. This is used to + indicate that the node has failed in unit tests. + Args: + rank (int): Rank of the node expected to fail + expected_error_regex (optional, str): Regex of exception message expected. Useful to ensure a specific failure + occurs, not just any. + """ + while True: + try: + rpc.rpc_sync(f"worker{rank}", noop, args=()) + time.sleep(0.1) + except Exception as e: + if re.search(pattern=expected_error_regex, string=str(e)): + return str(e) + + +def wait_until_pending_futures_and_users_flushed(timeout: int = 20) -> None: + """ + The RRef protocol holds forkIds of rrefs in a map until those forks are + confirmed by the owner. The message confirming the fork may arrive after + our tests check whether this map is empty, which leads to failures and + flaky tests. to_here also does not guarantee that we have finished + processind the owner's confirmation message for the RRef. This function + loops until the map is empty, which means the messages have been received + as processed. Call this function before asserting the map returned by + _get_debug_info is empty. + """ + start = time.time() + while True: + debug_info = _rref_context_get_debug_info() + num_pending_futures = int(debug_info["num_pending_futures"]) + num_pending_users = int(debug_info["num_pending_users"]) + if num_pending_futures == 0 and num_pending_users == 0: + break + time.sleep(0.1) + if time.time() - start > timeout: + raise ValueError( + "Timed out waiting to flush pending futures and users, had {} pending futures and {} pending users".format( + num_pending_futures, num_pending_users + ) + ) + + +def get_num_owners_and_forks() -> Tuple[str, str]: + """ + Retrieves number of OwnerRRefs and forks on this node from + _rref_context_get_debug_info. + """ + rref_dbg_info = _rref_context_get_debug_info() + num_owners = rref_dbg_info["num_owner_rrefs"] + num_forks = rref_dbg_info["num_forks"] + return num_owners, num_forks + + +def wait_until_owners_and_forks_on_rank( + num_owners: int, num_forks: int, rank: int, timeout: int = 20 +) -> None: + """ + Waits until timeout for num_forks and num_owners to exist on the rank. Used + to ensure proper deletion of RRefs in tests. + """ + start = time.time() + while True: + num_owners_on_rank, num_forks_on_rank = rpc.rpc_sync( + worker_name(rank), get_num_owners_and_forks, args=(), timeout=5 + ) + num_owners_on_rank = int(num_owners_on_rank) + num_forks_on_rank = int(num_forks_on_rank) + if num_owners_on_rank == num_owners and num_forks_on_rank == num_forks: + return + time.sleep(1) + if time.time() - start > timeout: + raise ValueError( + "Timed out waiting {} sec for {} owners and {} forks on rank, had {} owners and {} forks".format( + timeout, + num_owners, + num_forks, + num_owners_on_rank, + num_forks_on_rank, + ) + ) + + +def initialize_pg(init_method, rank: int, world_size: int) -> None: + # This is for tests using `dist.barrier`. + if not dist.is_initialized(): + dist.init_process_group( + backend="gloo", + init_method=init_method, + rank=rank, + world_size=world_size, + ) + + +def worker_name(rank: int) -> str: + return f"worker{rank}" + + +def get_function_event(function_events, partial_event_name): + """ + Returns the first event that matches partial_event_name in the provided + function_events. These function_events should be the output of + torch.autograd.profiler.function_events(). + + Args: + function_events: function_events returned by the profiler. + event_name (str): partial key that the event was profiled with. + """ + event = [event for event in function_events if partial_event_name in event.name][0] # noqa: RUF015 + return event diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/generated/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/generated/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/generated/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/generated/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..810e96bf5f0cbcac5b581a16ff96961d8c6c9e79 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/generated/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/generated/__pycache__/annotated_fn_args.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/generated/__pycache__/annotated_fn_args.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f292079da6fb78cb7985e64edc9a6590d75872e1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/generated/__pycache__/annotated_fn_args.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/generated/annotated_fn_args.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/generated/annotated_fn_args.py new file mode 100644 index 0000000000000000000000000000000000000000..649c220ebfe4985abeb3685102cb66ce9a4a471e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/generated/annotated_fn_args.py @@ -0,0 +1,2820 @@ +""" +This file is needed for generating procedural tests required for +testing __torch_function__. See tests/test_overrides.py. +""" + +# flake8: noqa +import torch + +annotated_args = { + torch._C._VariableFunctions._cast_Byte: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Char: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Double: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Float: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Int: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Long: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Short: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cast_Half: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._make_dual: [{'is_kwarg_only': 'False', 'name': 'primal', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tangent', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._unpack_dual: [{'is_kwarg_only': 'False', 'name': 'dual', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.align_tensors: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._assert_async: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._assert_async: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'assert_msg', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions._functional_assert_async: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'assert_msg', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'False', 'name': 'dep_token', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._assert_tensor_metadata: [{'is_kwarg_only': 'False', 'name': 'a', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sym_constrain_range: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.sym_constrain_range_for_size: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._functional_sym_constrain_range: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'int64_t?'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'int64_t?'}, {'is_kwarg_only': 'False', 'name': 'dep_token', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._functional_sym_constrain_range_for_size: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'int64_t?'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'int64_t?'}, {'is_kwarg_only': 'False', 'name': 'dep_token', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._make_dep_token: [], + torch._C._VariableFunctions._use_cudnn_ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'blank', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._use_cudnn_ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blank', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._cudnn_ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'blank', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'zero_infinity', 'simple_type': 'bool'}], + torch._C._VariableFunctions._cudnn_ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blank', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'zero_infinity', 'simple_type': 'bool'}], + torch._C._VariableFunctions._use_cudnn_rnn_flatten_weight: [], + torch._C._VariableFunctions._cudnn_rnn_flatten_weight: [{'is_kwarg_only': 'False', 'name': 'weight_arr', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight_stride0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hidden_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'proj_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}], + torch._C._VariableFunctions._cudnn_rnn: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight_stride0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'weight_buf', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cx', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hidden_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'proj_size', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dropout_state', 'simple_type': 'Tensor?'}], + torch._C._VariableFunctions._cudnn_init_dropout_state: [{'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'dropout_seed', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._debug_has_internal_overlap: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._fused_dropout: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}], + torch._C._VariableFunctions._masked_scale: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'double'}], + torch._C._VariableFunctions.native_dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool?'}], + torch._C._VariableFunctions._sobol_engine_draw: [{'is_kwarg_only': 'False', 'name': 'quasi', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sobolstate', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_generated', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType?'}], + torch._C._VariableFunctions._sobol_engine_ff_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sobolstate', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_generated', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._sobol_engine_scramble_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ltm', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._sobol_engine_initialize_state_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._reshape_from_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shape', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._shape_as_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.dropout_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.feature_dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.feature_dropout_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.alpha_dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.alpha_dropout_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.feature_alpha_dropout: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.feature_alpha_dropout_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.abs: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.abs: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.abs_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.absolute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.absolute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.angle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.angle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.view_as_real: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.view_as_complex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sgn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sgn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.real: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.imag: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conj_physical_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.resolve_conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.resolve_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._neg_view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.acos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.acos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.acos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arccos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arccos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arccos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.avg_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.adaptive_avg_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.adaptive_max_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._add_relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._add_relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.addmv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addmv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addmv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.affine_grid_generator: [{'is_kwarg_only': 'False', 'name': 'theta', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._VariableFunctions._is_all_true: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._is_any_true: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_check_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_functorch_fallback: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.allclose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.arange: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._dim_arange: [{'is_kwarg_only': 'False', 'name': 'like', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.argmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.argmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.argmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.argmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.acosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.acosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.acosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arccosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arccosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arccosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.asinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.asinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.asinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arcsinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arcsinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arcsinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.as_strided: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.as_strided_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.asin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.asin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.asin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arcsin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arcsin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arcsin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atleast_1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atleast_1d: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.atleast_2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atleast_2d: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.atleast_3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atleast_3d: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.baddbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.baddbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bartlett_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.bartlett_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}], + torch._C._VariableFunctions.quantized_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'var', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'output_scale', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'output_zero_point', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._batch_norm_impl_index: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}], + torch._C._VariableFunctions.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}], + torch._C._VariableFunctions.bilinear: [{'is_kwarg_only': 'False', 'name': 'input1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.binary_cross_entropy_with_logits: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bincount: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.logical_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logical_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.blackman_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.blackman_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.bmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.broadcast_tensors: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.broadcast_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._sparse_broadcast_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.concat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.concat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.concat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.concat: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.concatenate: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.concatenate: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.concatenate: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.concatenate: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.block_diag: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.ceil: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ceil: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ceil_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.chain_matmul: [{'is_kwarg_only': 'False', 'name': 'matrices', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.chain_matmul: [{'is_kwarg_only': 'False', 'name': 'matrices', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.unsafe_chunk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'chunks', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.chunk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'chunks', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor_indices_or_sections', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clip_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.clip_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cudnn_is_acceptable: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.complex: [{'is_kwarg_only': 'False', 'name': 'real', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'imag', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.complex: [{'is_kwarg_only': 'False', 'name': 'real', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'imag', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.polar: [{'is_kwarg_only': 'False', 'name': 'abs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'angle', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.polar: [{'is_kwarg_only': 'False', 'name': 'abs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'angle', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.constant_pad_nd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pad', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.convolution: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'transposed', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions._convolution: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'transposed', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'allow_tf32', 'simple_type': 'bool'}], + torch._C._VariableFunctions._convolution: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'transposed', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}], + torch._C._VariableFunctions._convolution_mode: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.conv1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv_tbc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv_transpose1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv_transpose2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.conv_transpose3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._copy_from: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dst', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._copy_from_and_resize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dst', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cosine_embedding_loss: [{'is_kwarg_only': 'False', 'name': 'input1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.count_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.count_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cov: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.corrcoef: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cudnn_affine_grid_generator: [{'is_kwarg_only': 'False', 'name': 'theta', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'N', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'C', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'H', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'W', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cudnn_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'exponential_average_factor', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'epsilon', 'simple_type': 'double'}], + torch._C._VariableFunctions.cudnn_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'allow_tf32', 'simple_type': 'bool'}], + torch._C._VariableFunctions.cudnn_convolution_transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'allow_tf32', 'simple_type': 'bool'}], + torch._C._VariableFunctions._mps_convolution_transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.cudnn_convolution_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.cudnn_convolution_add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'z', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.cudnn_grid_sampler: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions._cummax_helper: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions._cummin_helper: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.cumulative_trapezoid: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cumulative_trapezoid: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._ctc_loss: [{'is_kwarg_only': 'False', 'name': 'log_probs', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'targets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input_lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target_lengths', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diag_embed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diagflat: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diagonal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diagonal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diff: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diff: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'ScalarList'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.gradient: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'spacing', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch._C._VariableFunctions.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch._C._VariableFunctions.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch._C._VariableFunctions.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.dot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.dot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.vdot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.vdot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.einsum: [{'is_kwarg_only': 'False', 'name': 'equation', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.embedding: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.embedding_renorm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max_norm', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'norm_type', 'simple_type': 'double'}], + torch._C._VariableFunctions._embedding_bag_forward_only: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._rowwise_prune: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'compressed_indices_dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.row_stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.row_stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.embedding_bag: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.embedding_bag: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_grad_by_freq', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sparse', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'per_sample_weights', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'include_last_offset', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'padding_idx', 'simple_type': 'int64_t?'}], + torch._C._VariableFunctions._embedding_bag: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.empty: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.empty: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.empty: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.empty_permuted: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'physical_layout', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._empty_affine_quantized: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._empty_per_channel_affine_quantized: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'scales', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'zero_points', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'axis', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._resize_output_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'device', 'simple_type': 'Device'}], + torch._C._VariableFunctions.empty_quantized: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'qtensor', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.empty_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.empty_strided: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erf_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erfc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.exp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.exp2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.expm1_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.eye: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.eye: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'm', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.eye: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.eye: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'm', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'start_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'end_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'start_dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'end_dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'DimnameList'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.unflatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sizes', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.unflatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'sizes', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList'}], + torch._C._VariableFunctions.fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.floor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.floor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.floor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.frac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.frac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.frac_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.full: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.full: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.full: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.full_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.from_file: [{'is_kwarg_only': 'False', 'name': 'filename', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.gcd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gcd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gcd_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lcm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lcm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lcm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.grid_sampler: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'interpolation_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'padding_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._VariableFunctions.grid_sampler_2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'interpolation_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'padding_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._VariableFunctions._grid_sampler_2d_cpu_fallback: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'interpolation_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'padding_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._VariableFunctions.grid_sampler_3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'grid', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'interpolation_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'padding_mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._VariableFunctions.hann_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.hann_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.hamming_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.hamming_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.hamming_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'double'}], + torch._C._VariableFunctions.hamming_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'beta', 'simple_type': 'double'}], + torch._C._VariableFunctions.kaiser_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.kaiser_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.kaiser_window: [{'is_kwarg_only': 'False', 'name': 'window_length', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'periodic', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'beta', 'simple_type': 'double'}], + torch._C._VariableFunctions.hinge_embedding_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.group_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_groups', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.native_group_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'N', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'C', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'HxW', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'group', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions._fft_r2c: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'onesided', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fft_r2c: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'onesided', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fft_c2r: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'last_dim_size', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions._fft_c2r: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'last_dim_size', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions._fft_c2c: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'forward', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fft_c2c: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'normalization', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'forward', 'simple_type': 'bool'}], + torch._C._VariableFunctions._validate_compressed_sparse_indices: [{'is_kwarg_only': 'False', 'name': 'is_crow', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'compressed_idx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'plain_idx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cdim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'nnz', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._cufft_get_plan_cache_size: [{'is_kwarg_only': 'False', 'name': 'device_index', 'simple_type': 'DeviceIndex'}], + torch._C._VariableFunctions._cufft_get_plan_cache_max_size: [{'is_kwarg_only': 'False', 'name': 'device_index', 'simple_type': 'DeviceIndex'}], + torch._C._VariableFunctions._cufft_set_plan_cache_max_size: [{'is_kwarg_only': 'False', 'name': 'device_index', 'simple_type': 'DeviceIndex'}, {'is_kwarg_only': 'False', 'name': 'max_size', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._cufft_clear_plan_cache: [{'is_kwarg_only': 'False', 'name': 'device_index', 'simple_type': 'DeviceIndex'}], + torch._C._VariableFunctions._unsafe_index: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List>'}], + torch._C._VariableFunctions.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_put_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._unsafe_index_put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._index_put_impl_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.instance_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'use_input_stats', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'cudnn_enabled', 'simple_type': 'bool'}], + torch._C._VariableFunctions.isclose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'elements', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'test_elements', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'elements', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'test_elements', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'elements', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'test_element', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'elements', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'test_element', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'element', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'test_elements', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isin: [{'is_kwarg_only': 'False', 'name': 'element', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'test_elements', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isnan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_distributed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_floating_point: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_complex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._is_zerotensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isreal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_same_size: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_signed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.is_inference: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.kl_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.kron: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.kron: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.layer_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'normalized_shape', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.native_layer_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'normalized_shape', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions.nan_to_num: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nan_to_num: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nan_to_num_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mkldnn_linear_backward_weights: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias_defined', 'simple_type': 'bool'}], + torch._C._VariableFunctions._cslt_compress: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._cslt_sparse_mm: [{'is_kwarg_only': 'False', 'name': 'compressed_A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dense_B', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sparse_semi_structured_linear: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'meta', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._mixed_dtypes_linear: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_linear_int8_weight_fp32_activation: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight_scale', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'weight_zero_point', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_linear_int8_weight: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight_scale', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'weight_zero_point', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_linear_quantize_weight: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_pack_gemm_matrix_fp16: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_linear_fp16_weight_fp32_activation: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_linear_fp16_weight: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_pack_quantized_matrix: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fbgemm_pack_quantized_matrix: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'K', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'N', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.ldexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ldexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ldexp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.linspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.log: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log10: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log10: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log10_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log1p_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.log2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logaddexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logaddexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logaddexp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logaddexp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.xlogy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.xlogy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logspace: [{'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'steps', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions._log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'half_to_float', 'simple_type': 'bool'}], + torch._C._VariableFunctions._log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'half_to_float', 'simple_type': 'bool'}], + torch._C._VariableFunctions._log_softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions._log_softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions._logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.margin_ranking_loss: [{'is_kwarg_only': 'False', 'name': 'input1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.matrix_exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._compute_linear_combination: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'coefficients', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._compute_linear_combination: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'coefficients', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.amax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.amax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.max_pool1d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.max_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions.mkldnn_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions.mkldnn_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._VariableFunctions.quantized_max_pool1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.quantized_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions.quantized_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._VariableFunctions.max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.nanmean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nanmean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.amin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.amin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._mps_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.mkldnn_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.mkldnn_rnn_layer: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight0', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight3', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx_', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cx_', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reverse', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hidden_size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}], + torch._C._VariableFunctions.miopen_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'exponential_average_factor', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'epsilon', 'simple_type': 'double'}], + torch._C._VariableFunctions.miopen_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.miopen_convolution_transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'output_padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.miopen_depthwise_convolution: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'benchmark', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'deterministic', 'simple_type': 'bool'}], + torch._C._VariableFunctions.miopen_convolution_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.miopen_convolution_add_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'z', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.miopen_rnn: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight_stride0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cx', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'hidden_size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dropout_state', 'simple_type': 'Tensor?'}], + torch._C._VariableFunctions.mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._int_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._int_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._convert_weight_to_int4pack: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'innerKTiles', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._weight_int4pack_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qGroupSize', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'qScaleAndZeros', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sparse_sparse_matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.mv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mvlgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.mvlgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.narrow_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.narrow_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.narrow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.narrow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.native_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions.native_batch_norm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions._native_batch_norm_legit: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions._native_batch_norm_legit: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions._native_batch_norm_legit: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions._native_batch_norm_legit: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'training', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions._native_batch_norm_legit_no_training: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions.batch_norm_stats: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions.batch_norm_elemt: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions.batch_norm_elemt: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}], + torch._C._VariableFunctions.batch_norm_gather_stats: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'count', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.batch_norm_gather_stats_with_counts: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'counts', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.batch_norm_backward_reduce: [{'is_kwarg_only': 'False', 'name': 'grad_out', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'input_g', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'weight_g', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bias_g', 'simple_type': 'bool'}], + torch._C._VariableFunctions.batch_norm_backward_elemt: [{'is_kwarg_only': 'False', 'name': 'grad_out', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'invstd', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'sum_dy', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sum_dy_xmu', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'count', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.batch_norm_update_stats: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_mean', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'running_var', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'momentum', 'simple_type': 'double'}], + torch._C._VariableFunctions.is_vulkan_available: [], + torch._C._VariableFunctions._nnpack_available: [], + torch._C._VariableFunctions._nnpack_spatial_convolution: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._VariableFunctions.ones: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.ones: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.ones: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.ones_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pairwise_distance: [{'is_kwarg_only': 'False', 'name': 'x1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cdist: [{'is_kwarg_only': 'False', 'name': 'x1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._euclidean_dist: [{'is_kwarg_only': 'False', 'name': 'x1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pdist: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cosine_similarity: [{'is_kwarg_only': 'False', 'name': 'x1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.permute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.movedim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.movedim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.moveaxis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.moveaxis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.adjoint: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pixel_shuffle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'upscale_factor', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.pixel_unshuffle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'downscale_factor', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.channel_shuffle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.native_channel_shuffle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'groups', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions._pin_memory: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pinverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.poisson_nll_loss: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'log_input', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'full', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'reduction', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.rad2deg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rad2deg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rad2deg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.deg2rad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.deg2rad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.deg2rad_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scalar_tensor: [{'is_kwarg_only': 'False', 'name': 's', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.rand: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.rand_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.randint: [{'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randint_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.randint_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'low', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'high', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.randn: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randn_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.randperm: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.randperm: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.randperm: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.randperm: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'True', 'name': 'generator', 'simple_type': 'Generator?'}], + torch._C._VariableFunctions.ravel: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.reciprocal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.reciprocal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.reciprocal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.neg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.negative: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.negative: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.negative_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.reshape: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shape', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._mkldnn_reshape: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shape', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rrelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rrelu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.prelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._prelu_kernel: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.hardshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.hardshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rsqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rsqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rsqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.selu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.selu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.celu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.celu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sigmoid_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logit_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sinc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.detach: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.detach_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.slice_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.select_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.diagonal_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.as_strided_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.smm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions._softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'half_to_float', 'simple_type': 'bool'}], + torch._C._VariableFunctions._softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'half_to_float', 'simple_type': 'bool'}], + torch._C._VariableFunctions._softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions._softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'input_dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.unsafe_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.unsafe_split_with_sizes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.split_with_sizes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.hsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.hsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.vsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.vsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.dsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.dsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.sspaddmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sspaddmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.hstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.hstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.vstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.vstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.dstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.dstack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.stft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.stft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.istft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.nansum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nansum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.square: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.square: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.square_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.std_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.t: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tensordot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims_self', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dims_other', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.tensordot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims_self', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'dims_other', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.threshold: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'threshold', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.threshold: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'threshold', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.threshold_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'threshold', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.tile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions._mkldnn_transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._mkldnn_transpose_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.flip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.fliplr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.flipud: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.roll: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shifts', 'simple_type': 'SymIntArrayRef', 'size': 1}], + torch._C._VariableFunctions.rot90: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trapezoid: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trapezoid: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trapz: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trapz: [{'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._transform_bias_rescale_qkv: [{'is_kwarg_only': 'False', 'name': 'qkv', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qkv_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_heads', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._nested_tensor_from_mask: [{'is_kwarg_only': 'False', 'name': 't', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_tensor_from_mask_left_aligned: [{'is_kwarg_only': 'False', 'name': 't', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_from_padded: [{'is_kwarg_only': 'False', 'name': 'padded', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cpu_nested_shape_example', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_from_padded_and_nested_example: [{'is_kwarg_only': 'False', 'name': 'padded', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nt_example', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_view_from_buffer: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_size', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_strides', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_view_from_buffer_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_size', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_strides', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_view_from_buffer_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_size', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'nested_strides', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'offsets', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._trilinear: [{'is_kwarg_only': 'False', 'name': 'i1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'i2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'i3', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'expand1', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'expand2', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'expand3', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'sumdim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.triplet_margin_loss: [{'is_kwarg_only': 'False', 'name': 'anchor', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'positive', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'negative', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trunc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trunc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.trunc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fix: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fix: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fix_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._has_compatible_shallow_copy_type: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'from', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._unique: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.unique_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.unique_consecutive: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._unique2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.unsqueeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.vander: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.var_mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.norm_except_dim: [{'is_kwarg_only': 'False', 'name': 'v', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._weight_norm: [{'is_kwarg_only': 'False', 'name': 'v', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'g', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._weight_norm_interface: [{'is_kwarg_only': 'False', 'name': 'v', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'g', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.zeros: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'True', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch._C._VariableFunctions.zeros: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.zeros: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._efficientzerotensor: [{'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.zeros_like: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._standard_gamma_grad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._standard_gamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._dirichlet_grad: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'alpha', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'total', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sample_dirichlet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.poisson: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.binomial: [{'is_kwarg_only': 'False', 'name': 'count', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'prob', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.native_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.native_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType?'}], + torch._C._VariableFunctions._sparse_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sparse_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions._sparse_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions._sparse_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions._sparse_csr_sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions._sparse_csr_prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions._sparse_softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._sparse_log_softmax_backward_data: [{'is_kwarg_only': 'False', 'name': 'grad_output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch._C._VariableFunctions.frexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.frexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.frobenius_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.frobenius_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._VariableFunctions.nuclear_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nuclear_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nuclear_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions.nuclear_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions.clone: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.positive: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.resize_as_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'the_template', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.resize_as_sparse_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'the_template', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.zero_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.rsub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rsub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.heaviside: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.heaviside: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._addmm_activation: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._addmm_activation: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._scaled_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._scaled_mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._validate_sparse_coo_tensor_args: [{'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._validate_sparse_compressed_tensor_args: [{'is_kwarg_only': 'False', 'name': 'compressed_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'plain_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'layout', 'simple_type': 'Layout'}], + torch._C._VariableFunctions._validate_sparse_csr_tensor_args: [{'is_kwarg_only': 'False', 'name': 'crow_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._validate_sparse_csc_tensor_args: [{'is_kwarg_only': 'False', 'name': 'ccol_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'row_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._validate_sparse_bsr_tensor_args: [{'is_kwarg_only': 'False', 'name': 'crow_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._validate_sparse_bsc_tensor_args: [{'is_kwarg_only': 'False', 'name': 'ccol_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'row_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._to_cpu: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._coalesce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.hspmm: [{'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.hspmm: [{'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.unbind: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.unbind: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions._to_sparse_semi_structured: [{'is_kwarg_only': 'False', 'name': 'dense', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.quantize_per_tensor_dynamic: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}, {'is_kwarg_only': 'False', 'name': 'reduce_range', 'simple_type': 'bool'}], + torch._C._VariableFunctions.quantize_per_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.quantize_per_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.quantize_per_tensor: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scales', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_points', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.quantize_per_channel: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scales', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_points', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.dequantize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.dequantize: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.q_scale: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.q_zero_point: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.q_per_channel_scales: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.q_per_channel_zero_points: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.q_per_channel_axis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.int_repr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._make_per_tensor_quantized_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._make_per_channel_quantized_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.fake_quantize_per_tensor_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.fake_quantize_per_tensor_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._fake_quantize_per_tensor_affine_cachemask_tensor_qparams: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fake_quant_enabled', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._fake_quantize_learnable_per_tensor_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.fake_quantize_per_channel_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._fake_quantize_learnable_per_channel_affine: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.fused_moving_avg_obs_fake_quant: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'observer_on', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fake_quant_on', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_min', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_max', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'averaging_const', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'ch_axis', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._fused_moving_avg_obs_fq_helper: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'observer_on', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fake_quant_on', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_min', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'running_max', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'zero_point', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'averaging_const', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'quant_min', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'quant_max', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'ch_axis', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._choose_qparams_per_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._saturate_weight_to_fp16: [{'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.choose_qparams_optimized: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'numel', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'n_bins', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'ratio', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'bit_width', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.meshgrid: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.meshgrid: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'indexing', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.cartesian_prod: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.combinations: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.result_type: [{'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.result_type: [{'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.result_type: [{'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.result_type: [{'is_kwarg_only': 'False', 'name': 'scalar1', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scalar2', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.can_cast: [{'is_kwarg_only': 'False', 'name': 'from', 'simple_type': 'ScalarType'}, {'is_kwarg_only': 'False', 'name': 'to', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.promote_types: [{'is_kwarg_only': 'False', 'name': 'type1', 'simple_type': 'ScalarType'}, {'is_kwarg_only': 'False', 'name': 'type2', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions._lstm_mps: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}], + torch._C._VariableFunctions.lstm: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}], + torch._C._VariableFunctions.lstm: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}], + torch._C._VariableFunctions.gru: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}], + torch._C._VariableFunctions.gru: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}], + torch._C._VariableFunctions.rnn_tanh: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}], + torch._C._VariableFunctions.rnn_tanh: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}], + torch._C._VariableFunctions.rnn_relu: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}], + torch._C._VariableFunctions.rnn_relu: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'params', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'has_biases', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'num_layers', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dropout', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'train', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'bidirectional', 'simple_type': 'bool'}], + torch._C._VariableFunctions.lstm_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gru_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rnn_tanh_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.rnn_relu_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.quantized_lstm_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scale_hh', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_hh', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.quantized_gru_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scale_hh', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_hh', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.quantized_rnn_relu_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scale_hh', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_hh', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.quantized_rnn_tanh_cell: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'hx', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'w_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'packed_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_ih', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_offsets_hh', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'scale_hh', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_ih', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'zero_point_hh', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._pack_padded_sequence: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'lengths', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}], + torch._C._VariableFunctions._pad_packed_sequence: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_sizes', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'padding_value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'total_length', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.masked_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.masked_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.masked_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._masked_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.index_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.scatter_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.scatter_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.__and__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.__and__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.__or__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.__or__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.__xor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.__xor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.__lshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.__lshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.__rshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.__rshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diag: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diag: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.triu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.triu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tril: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tril: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.tril_indices: [{'is_kwarg_only': 'False', 'name': 'row', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'col', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.triu_indices: [{'is_kwarg_only': 'False', 'name': 'row', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'col', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.trace: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.take: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.take: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.take_along_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.take_along_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.masked_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.masked_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nonzero_static: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'size', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.nonzero_static: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'size', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.argwhere: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.triangular_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.triangular_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_check_errors: [{'is_kwarg_only': 'False', 'name': 'info', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'api_name', 'simple_type': 'c10::string_view'}, {'is_kwarg_only': 'True', 'name': 'is_matrix', 'simple_type': 'bool'}], + torch._C._VariableFunctions.svd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.svd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.swapaxes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'axis1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.swapdims: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cholesky_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cholesky_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cholesky_inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.cholesky_inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.qr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.qr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.geqrf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.geqrf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.orgqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.orgqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ormqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input3', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ormqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input3', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._lu_with_info: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lu_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lu_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lu_unpack: [{'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lu_unpack: [{'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.multinomial: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_samples', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.multinomial: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_samples', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.lgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.i0_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.signbit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.signbit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.dist: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.atan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.arctan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.histc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.histc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._histogramdd_bin_edges: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._histogramdd_from_bin_cts: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._histogramdd_from_bin_tensors: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.histogramdd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.histogramdd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.histogramdd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.hypot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.hypot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.igamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.igamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.igammac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.igammac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nextafter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nextafter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.fmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}], + torch._C._VariableFunctions.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}], + torch._C._VariableFunctions.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}], + torch._C._VariableFunctions.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.msort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.msort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool'}], + torch._C._VariableFunctions.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch._C._VariableFunctions.topk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.topk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.renorm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'maxnorm', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.renorm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'maxnorm', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.normal: [{'is_kwarg_only': 'False', 'name': 'mean', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'std', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._amp_foreach_non_finite_check_and_unscale_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'found_inf', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'inv_scale', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._amp_update_scale_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'growth_tracker', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'found_inf', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'scale_growth_factor', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'scale_backoff_factor', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'growth_interval', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._foreach_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_sub_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_sub_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sub_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_maximum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_maximum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_maximum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_minimum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalar', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_minimum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_minimum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_addcdiv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_addcdiv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_addcdiv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_addcmul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_addcmul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_addcmul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'scalars', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foreach_abs: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_abs_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_acos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_acos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_asin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_asin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_atan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_atan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_ceil: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_ceil_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_cos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_cos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_cosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_cosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_erf_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_erfc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_exp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_expm1_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_floor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_floor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_frac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_frac_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weights', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_lerp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weights', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_lerp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'tensors1', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_lgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_lgamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log10: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log10_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log1p_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_log2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_neg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._foreach_pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'ScalarList'}], + torch._C._VariableFunctions._foreach_reciprocal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_reciprocal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sigmoid_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sign_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_sqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_tan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_tan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_tanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_tanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_trunc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_trunc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_zero_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._foreach_copy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.bucketize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'boundaries', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bucketize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'boundaries', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.bucketize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'boundaries', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.searchsorted: [{'is_kwarg_only': 'False', 'name': 'sorted_sequence', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.searchsorted: [{'is_kwarg_only': 'False', 'name': 'sorted_sequence', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.searchsorted: [{'is_kwarg_only': 'False', 'name': 'sorted_sequence', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions.searchsorted: [{'is_kwarg_only': 'False', 'name': 'sorted_sequence', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}], + torch._C._VariableFunctions._convert_indices_from_coo_to_csr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._convert_indices_from_coo_to_csr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._convert_indices_from_csr_to_coo: [{'is_kwarg_only': 'False', 'name': 'crow_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_indices', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._convert_indices_from_csr_to_coo: [{'is_kwarg_only': 'False', 'name': 'crow_indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'col_indices', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.mkldnn_adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions.mkldnn_adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._VariableFunctions._adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._VariableFunctions._adaptive_avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._VariableFunctions.column_stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.column_stack: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions.isfinite: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isposinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isposinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isneginf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.isneginf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._add_batch_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._remove_batch_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'batch_size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._linalg_det: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_det: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.det: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_slogdet: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_slogdet: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.slogdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.slogdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.logdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_eigh: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_eigh: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.inner: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.inner: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.outer: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.outer: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ger: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ger: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_svd: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_svd: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_solve_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._linalg_solve_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_serialization_subcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_autograd_multiple_dispatch: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_autograd_multiple_dispatch: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b', 'simple_type': 'bool'}], + torch._C._VariableFunctions._test_autograd_multiple_dispatch_view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_autograd_multiple_dispatch_view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._test_autograd_multiple_dispatch_view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.segment_reduce: [{'is_kwarg_only': 'False', 'name': 'data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch._C._VariableFunctions._nested_tensor_from_tensor_list: [{'is_kwarg_only': 'False', 'name': 'list', 'simple_type': 'TensorList'}], + torch._C._VariableFunctions._fw_primal_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._fw_primal_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._make_dual_copy: [{'is_kwarg_only': 'False', 'name': 'primal', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tangent', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._make_dual_copy: [{'is_kwarg_only': 'False', 'name': 'primal', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tangent', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'level', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.view_as_real_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.view_as_real_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.view_as_complex_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.view_as_complex_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._conj_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._conj_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._neg_view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._neg_view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.as_strided_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.as_strided_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._sparse_broadcast_to_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._sparse_broadcast_to_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.diagonal_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.diagonal_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.expand_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.expand_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.permute_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.permute_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions._reshape_alias_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions._reshape_alias_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.select_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.select_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.detach_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.detach_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.slice_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.slice_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.split_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.split_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}], + torch._C._VariableFunctions.split_with_sizes_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.split_with_sizes_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.squeeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch._C._VariableFunctions.t_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.t_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.transpose_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.transpose_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.unsqueeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.unsqueeze_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.values_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.crow_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.crow_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.col_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.col_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ccol_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.ccol_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.row_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.row_indices_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.unbind_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.unbind_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch._C._VariableFunctions.view_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch._C._VariableFunctions.unfold_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'step', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.unfold_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'step', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions.alias_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions.alias_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._nested_tensor_softmax_with_shape: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._transformer_encoder_layer_fwd: [{'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'embed_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_heads', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'qkv_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qkv_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'use_gelu', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'norm_first', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'norm_weight_1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'norm_bias_1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'norm_weight_2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'norm_bias_2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ffn_weight_1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ffn_bias_1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ffn_weight_2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ffn_bias_2', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._native_multi_head_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'embed_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_head', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'qkv_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qkv_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._fused_sdp_choice: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._scaled_dot_product_attention_math: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._scaled_dot_product_flash_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._scaled_dot_product_efficient_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'attn_bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'compute_log_sumexp', 'simple_type': 'bool'}], + torch._C._VariableFunctions._triton_scaled_dot_attention: [{'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'v', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._fill_mem_eff_dropout_mask_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dropout_p', 'simple_type': 'double'}, {'is_kwarg_only': 'False', 'name': 'seed', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'offset', 'simple_type': 'int64_t'}], + torch._C._VariableFunctions._triton_multi_head_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'embed_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'num_head', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'qkv_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'qkv_bias', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'proj_bias', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._foobar: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._VariableFunctions._fused_adam_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avgs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'max_exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta1', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta2', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'amsgrad', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fused_adam_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avgs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'max_exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'beta1', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta2', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'amsgrad', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fused_adamw_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avgs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'max_exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta1', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta2', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'amsgrad', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}], + torch._C._VariableFunctions._fused_adamw_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'grads', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avgs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'max_exp_avg_sqs', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'False', 'name': 'state_steps', 'simple_type': 'TensorList'}, {'is_kwarg_only': 'True', 'name': 'lr', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'beta1', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'beta2', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'weight_decay', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'eps', 'simple_type': 'double'}, {'is_kwarg_only': 'True', 'name': 'amsgrad', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'maximize', 'simple_type': 'bool'}], + torch._C._VariableFunctions._propagate_xla_data: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output', 'simple_type': 'Tensor'}], + torch._C._nn.binary_cross_entropy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.binary_cross_entropy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.linear: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._nn.linear: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._nn.mkldnn_linear: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch._C._nn.relu6: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.relu6_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.gelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.gelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.gelu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.silu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.silu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.silu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.mish: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.mish: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.mish_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.one_hot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.mkldnn_reorder_conv2d_weight: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.mkldnn_reorder_conv3d_weight: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.cross_entropy_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.mse_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.mse_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.l1_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.multi_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.multi_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.multilabel_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.multilabel_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.nll_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.nll_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.nll_loss_nd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.nll_loss2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.nll_loss2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.smooth_l1_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.smooth_l1_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.huber_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.huber_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.soft_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.soft_margin_loss: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'target', 'simple_type': 'Tensor'}], + torch._C._nn.elu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.elu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.elu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.glu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.glu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardsigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardsigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardsigmoid_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardtanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardtanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardtanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardswish: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardswish: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.hardswish_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.leaky_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.leaky_relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.leaky_relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.log_sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.log_sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.rrelu_with_noise: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'noise', 'simple_type': 'Tensor'}], + torch._C._nn.rrelu_with_noise: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'noise', 'simple_type': 'Tensor'}], + torch._C._nn.rrelu_with_noise_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'noise', 'simple_type': 'Tensor'}], + torch._C._nn.softplus: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.softplus: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.softshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.softshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.adaptive_avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.adaptive_avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.adaptive_avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.adaptive_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.adaptive_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.adaptive_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.adaptive_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.avg_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.avg_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.fractional_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'random_samples', 'simple_type': 'Tensor'}], + torch._C._nn.fractional_max_pool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'random_samples', 'simple_type': 'Tensor'}], + torch._C._nn.fractional_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'random_samples', 'simple_type': 'Tensor'}], + torch._C._nn.fractional_max_pool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'random_samples', 'simple_type': 'Tensor'}], + torch._C._nn.max_pool2d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.max_pool2d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.max_pool3d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.max_pool3d_with_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.max_unpool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.max_unpool2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.max_unpool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.max_unpool3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 3}], + torch._C._nn.reflection_pad1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.reflection_pad1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.reflection_pad2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 4}], + torch._C._nn.reflection_pad2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 4}], + torch._C._nn.reflection_pad3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 6}], + torch._C._nn.reflection_pad3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 6}], + torch._C._nn.replication_pad1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.replication_pad1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.replication_pad2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 4}], + torch._C._nn.replication_pad2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 4}], + torch._C._nn.replication_pad3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 6}], + torch._C._nn.replication_pad3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 6}], + torch._C._nn._pad_circular: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pad', 'simple_type': 'SymIntArrayRef'}], + torch._C._nn._pad_enum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pad', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'mode', 'simple_type': 'int64_t'}], + torch._C._nn.pad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pad', 'simple_type': 'SymIntArrayRef'}], + torch._C._nn.upsample_linear1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_linear1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_linear1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_bilinear2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_bilinear2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_bilinear2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn._upsample_bilinear2d_aa: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn._upsample_bilinear2d_aa: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn._upsample_bilinear2d_aa: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_trilinear3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_trilinear3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_trilinear3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_bicubic2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_bicubic2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_bicubic2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn._upsample_bicubic2d_aa: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn._upsample_bicubic2d_aa: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn._upsample_bicubic2d_aa: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'align_corners', 'simple_type': 'bool'}], + torch._C._nn.upsample_nearest1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_nearest1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}], + torch._C._nn.upsample_nearest1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}], + torch._C._nn._upsample_nearest_exact1d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn._upsample_nearest_exact1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}], + torch._C._nn._upsample_nearest_exact1d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 1}], + torch._C._nn.upsample_nearest2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_nearest2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.upsample_nearest2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn._upsample_nearest_exact2d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn._upsample_nearest_exact2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn._upsample_nearest_exact2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.upsample_nearest3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn.upsample_nearest3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.upsample_nearest3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn._upsample_nearest_exact3d: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef?'}, {'is_kwarg_only': 'False', 'name': 'scale_factors', 'simple_type': 'ArrayRef?'}], + torch._C._nn._upsample_nearest_exact3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn._upsample_nearest_exact3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.slow_conv_transpose2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.slow_conv_transpose2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.slow_conv_transpose3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.slow_conv_transpose3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.thnn_conv2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.thnn_conv2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn._conv_depthwise2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn._conv_depthwise2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.conv_depthwise3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'bias', 'simple_type': 'Tensor?'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'SymIntArrayRef', 'size': 3}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.slow_conv3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.slow_conv3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.slow_conv_dilated2d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 2}], + torch._C._nn.slow_conv_dilated3d: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'SymIntArrayRef', 'size': 3}], + torch._C._nn.col2im: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.col2im: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'output_size', 'simple_type': 'SymIntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.im2col: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn.im2col: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'kernel_size', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'dilation', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'IntArrayRef', 'size': 2}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'IntArrayRef', 'size': 2}], + torch._C._nn._test_optional_intlist: [{'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'addends', 'simple_type': 'IntArrayRef?'}], + torch._C._nn._test_optional_filled_intlist: [{'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'addends', 'simple_type': 'IntArrayRef?', 'size': 2}], + torch._C._nn._test_optional_floatlist: [{'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'addends', 'simple_type': 'ArrayRef?'}], + torch._C._nn._test_string_default: [{'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}], + torch._C._nn._test_ambiguous_defaults: [{'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}], + torch._C._nn._test_ambiguous_defaults: [{'is_kwarg_only': 'False', 'name': 'dummy', 'simple_type': 'Tensor'}], + torch._C._nn._test_warn_in_autograd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._nn.pad_sequence: [{'is_kwarg_only': 'False', 'name': 'sequences', 'simple_type': 'TensorList'}], + torch._C._nn.flatten_dense_tensors: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._nn.unflatten_dense_tensors: [{'is_kwarg_only': 'False', 'name': 'flat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._nn.scaled_dot_product_attention: [{'is_kwarg_only': 'False', 'name': 'query', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'key', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_diagonal: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_solve_triangular: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'upper', 'simple_type': 'bool'}], + torch._C._linalg.linalg_solve_triangular: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'upper', 'simple_type': 'bool'}], + torch._C._linalg.linalg_vander: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cholesky_ex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cholesky_ex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu_factor: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu_factor: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu_factor_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu_factor_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu_solve: [{'is_kwarg_only': 'False', 'name': 'LU', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pivots', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lu_solve: [{'is_kwarg_only': 'False', 'name': 'LU', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pivots', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_det: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_det: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_ldl_factor_ex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_ldl_factor_ex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_ldl_factor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_ldl_factor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_ldl_solve: [{'is_kwarg_only': 'False', 'name': 'LD', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pivots', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_ldl_solve: [{'is_kwarg_only': 'False', 'name': 'LD', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'pivots', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lstsq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_lstsq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'b', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_vecdot: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_vecdot: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'y', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_slogdet: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_slogdet: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eig: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eig: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eigvals: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eigvals: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eigh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eigh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eigvalsh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_eigvalsh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_householder_product: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tau', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_householder_product: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tau', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_inv_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_inv_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_inv: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_inv: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ord', 'simple_type': 'c10::string_view'}], + torch._C._linalg.linalg_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ord', 'simple_type': 'c10::string_view'}], + torch._C._linalg.linalg_vector_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_vector_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ord', 'simple_type': 'Scalar'}], + torch._C._linalg.linalg_matrix_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'ord', 'simple_type': 'Scalar'}], + torch._C._linalg.linalg_matrix_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_svd: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_svd: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_svdvals: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_svdvals: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cond: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cond: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_cond: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'c10::string_view'}], + torch._C._linalg.linalg_cond: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'c10::string_view'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'rcond', 'simple_type': 'double'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'rcond', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'rcond', 'simple_type': 'double'}], + torch._C._linalg.linalg_pinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'rcond', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_solve_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_solve_ex: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_solve: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_solve: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'B', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_tensorinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_tensorinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_tensorsolve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_tensorsolve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_qr: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_qr: [{'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._linalg.linalg_matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tol', 'simple_type': 'double'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tol', 'simple_type': 'double'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tol', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_matrix_rank: [{'is_kwarg_only': 'False', 'name': 'input', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tol', 'simple_type': 'Tensor'}], + torch._C._linalg.linalg_multi_dot: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._linalg.linalg_multi_dot: [{'is_kwarg_only': 'False', 'name': 'tensors', 'simple_type': 'TensorList'}], + torch._C._special.special_entr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_entr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_ndtri: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_ndtri: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_log_ndtr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_log_ndtr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_psi: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_psi: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_gammaln: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_gammaln: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erfcx: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erfcx: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_ndtr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_ndtr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlog1py: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_zeta: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch._C._special.special_i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i0e: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i0e: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i1e: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_i1e: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._special.special_logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch._C._special.special_expit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_expit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._special.special_gammainc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_gammainc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_gammaincc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_gammaincc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch._C._special.special_multigammaln: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}], + torch._C._special.special_multigammaln: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}], + torch._C._special.special_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch._C._special.special_airy_ai: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_airy_ai: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_j0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_j0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_j1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_j1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_y0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_y0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_y1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_bessel_y1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_h: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_hermite_polynomial_he: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_laguerre_polynomial_l: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_legendre_polynomial_p: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_modified_bessel_i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_i1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_i1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_k0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_k0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_k1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_modified_bessel_k1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._special.special_scaled_modified_bessel_k0: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_scaled_modified_bessel_k0: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_scaled_modified_bessel_k1: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_scaled_modified_bessel_k1: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_t: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_u: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_v: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Tensor'}], + torch._C._special.special_shifted_chebyshev_polynomial_w: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'Scalar'}], + torch._C._special.special_spherical_bessel_j0: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._special.special_spherical_bessel_j0: [{'is_kwarg_only': 'False', 'name': 'x', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_rfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_rfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_irfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_irfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_hfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_hfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ihfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ihfft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_rfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_rfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_irfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_irfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_hfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_hfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ihfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ihfft2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_rfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_rfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_irfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_irfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_hfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_hfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ihfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ihfftn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_fftfreq: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._fft.fft_fftfreq: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._fft.fft_rfftfreq: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._fft.fft_rfftfreq: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch._C._fft.fft_fftshift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch._C._fft.fft_ifftshift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.retain_grad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.rename_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch.Tensor.rename: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList?'}], + torch.Tensor.align_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList'}], + torch.Tensor.align_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'order', 'simple_type': 'DimnameList'}, {'is_kwarg_only': 'False', 'name': 'ellipsis_idx', 'simple_type': 'int64_t'}], + torch.Tensor.align_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.refine_names: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList'}], + torch.Tensor.abs: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.abs_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.absolute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.absolute_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.angle: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sgn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sgn_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.chalf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.conj_physical: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.conj_physical_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.resolve_conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.resolve_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._neg_view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.acos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.acos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arccos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arccos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.addmv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch.Tensor.addmv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch.Tensor.addr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch.Tensor.addr_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch.Tensor._is_all_true: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._is_any_true: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.all: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.allclose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.any: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.argmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.argmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.acosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.acosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arccosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arccosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.asinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.asinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arcsinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arcsinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.atanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.atanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arctanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arctanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.as_strided: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.as_strided_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.asin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.asin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arcsin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arcsin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.atan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.atan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arctan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.arctan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.baddbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch.Tensor.baddbmm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch.Tensor.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.bernoulli: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}], + torch.Tensor.bernoulli_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Tensor'}], + torch.Tensor.bernoulli_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.bincount: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_not_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.copysign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.copysign_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.copysign_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.logical_not: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.logical_not_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.logical_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.logical_xor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.logical_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.logical_and_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.logical_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.logical_or_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor.broadcast_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.ceil: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.ceil_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.unsafe_chunk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'chunks', 'simple_type': 'int64_t'}], + torch.Tensor.chunk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'chunks', 'simple_type': 'int64_t'}], + torch.Tensor.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'SymInt'}], + torch.Tensor.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.tensor_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor_indices_or_sections', 'simple_type': 'Tensor'}], + torch.Tensor.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clamp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clamp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clamp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}], + torch.Tensor.clamp_max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}], + torch.Tensor.clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Scalar'}], + torch.Tensor.clamp_max_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'max', 'simple_type': 'Tensor'}], + torch.Tensor.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}], + torch.Tensor.clamp_min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}], + torch.Tensor.clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Scalar'}], + torch.Tensor.clamp_min_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'min', 'simple_type': 'Tensor'}], + torch.Tensor.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clip_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clip_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cos: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cos_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cosh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cosh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.count_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch.Tensor.count_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cov: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.corrcoef: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.cummax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.cummin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.cumprod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.cumprod_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.cumprod_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.cumsum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.cumsum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.cumsum_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.diag_embed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.diagflat: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.diagonal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.diagonal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.fill_diagonal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}], + torch.Tensor.diff: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.div: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.div_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'rounding_mode', 'simple_type': 'c10::string_view?'}], + torch.Tensor.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.true_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.true_divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.true_divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.dot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}], + torch.Tensor.vdot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.new_empty: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.new_empty_strided: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.new_full: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'fill_value', 'simple_type': 'Scalar'}], + torch.Tensor.new_zeros: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.new_ones: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.resize_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.erf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.erf_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.erfc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.erfc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.exp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.exp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.exp2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.expm1: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.expm1_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.expand: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.expand_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'start_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'end_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}], + torch.Tensor.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'start_dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'end_dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}], + torch.Tensor.flatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'DimnameList'}, {'is_kwarg_only': 'False', 'name': 'out_dim', 'simple_type': 'Dimname'}], + torch.Tensor.unflatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'sizes', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.unflatten: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'sizes', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'names', 'simple_type': 'DimnameList'}], + torch.Tensor.fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.floor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.floor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.floor_divide: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.floor_divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.floor_divide_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.frac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.frac_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.gcd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.gcd_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.lcm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.lcm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.index_copy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_copy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_put_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch.Tensor.index_put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'c10::List>'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch.Tensor.isclose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.isnan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_distributed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_floating_point: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_complex: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_conj: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._is_zerotensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.isreal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_nonzero: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_same_size: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.is_signed: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_inference: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.kron: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}], + torch.Tensor.kthvalue: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.nan_to_num: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.nan_to_num_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.ldexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.ldexp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.log: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log10: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log10_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log1p: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log1p_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.logaddexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.logaddexp2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.xlogy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.xlogy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.xlogy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.log_softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.logcumsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch.Tensor.logsumexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.matmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.matrix_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch.Tensor.matrix_exp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.aminmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.max: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.amax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch.Tensor.mean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.nanmean: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.median: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.nanmedian: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.min: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.amin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.mm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.mode: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.mul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.mul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.multiply: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.multiply_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.multiply_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.mv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec', 'simple_type': 'Tensor'}], + torch.Tensor.mvlgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}], + torch.Tensor.mvlgamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'int64_t'}], + torch.Tensor.narrow_copy: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch.Tensor.narrow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'SymInt'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch.Tensor.narrow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'start', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'length', 'simple_type': 'SymInt'}], + torch.Tensor.permute: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}], + torch.Tensor.movedim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'IntArrayRef'}], + torch.Tensor.movedim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'int64_t'}], + torch.Tensor.moveaxis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'IntArrayRef'}], + torch.Tensor.moveaxis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'destination', 'simple_type': 'int64_t'}], + torch.Tensor.adjoint: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_pinned: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.pin_memory: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.pinverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.rad2deg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.rad2deg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.deg2rad: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.deg2rad_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.ravel: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.reciprocal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.reciprocal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.neg: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.neg_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.negative: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.negative_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.repeat: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'Tensor'}], + torch.Tensor.repeat_interleave: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'repeats', 'simple_type': 'SymInt'}], + torch.Tensor.reshape: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shape', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.reshape_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.round: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.round_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.relu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.relu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.prelu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch.Tensor.hardshrink: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.rsqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.rsqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'int64_t'}], + torch.Tensor.select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}], + torch.Tensor.sigmoid: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sigmoid_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.logit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.logit_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sin_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sinc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sinc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sinh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sinh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.detach: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.detach_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.slice_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.select_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'SymInt'}], + torch.Tensor.diagonal_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.as_strided_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'stride', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.smm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.softmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.unsafe_split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}], + torch.Tensor.split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymInt'}], + torch.Tensor.split: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.unsafe_split_with_sizes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.split_with_sizes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'split_sizes', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.hsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}], + torch.Tensor.hsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}], + torch.Tensor.vsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}], + torch.Tensor.vsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}], + torch.Tensor.dsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sections', 'simple_type': 'int64_t'}], + torch.Tensor.dsplit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'IntArrayRef'}], + torch.Tensor.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.squeeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch.Tensor.squeeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.squeeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.squeeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef'}], + torch.Tensor.squeeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.sspaddmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor.stft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}], + torch.Tensor.stft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}], + torch.Tensor.istft: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n_fft', 'simple_type': 'int64_t'}], + torch.Tensor.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch.Tensor.sum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.nansum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sum_to_size: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.sqrt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sqrt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.square: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.square_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.std: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.prod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.t: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.t_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.tan: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.tan_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.tanh: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.tanh_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.tile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch.Tensor.transpose: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'Dimname'}], + torch.Tensor.transpose_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch.Tensor.flip: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dims', 'simple_type': 'IntArrayRef'}], + torch.Tensor.fliplr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.flipud: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.roll: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'shifts', 'simple_type': 'SymIntArrayRef', 'size': 1}], + torch.Tensor.rot90: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._nested_tensor_size: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._nested_tensor_strides: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._nested_tensor_storage_offsets: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.trunc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.trunc_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.fix: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.fix_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.type_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.unsqueeze: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.unsqueeze_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}], + torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef?', 'size': 1}], + torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.var: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.view_as: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.where: [{'is_kwarg_only': 'False', 'name': 'condition', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'IntArrayRef', 'size': 1}], + torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}, {'is_kwarg_only': 'False', 'name': 'keepdim', 'simple_type': 'bool'}, {'is_kwarg_only': 'True', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch.Tensor.norm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar?'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'DimnameList', 'size': 1}], + torch.Tensor.frexp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.clone: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.positive: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.resize_as_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'the_template', 'simple_type': 'Tensor'}], + torch.Tensor.resize_as_sparse_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'the_template', 'simple_type': 'Tensor'}], + torch.Tensor.zero_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sub: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.sub_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.subtract: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.subtract_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.subtract_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.heaviside: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch.Tensor.heaviside_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'values', 'simple_type': 'Tensor'}], + torch.Tensor.addmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor.addmm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor._addmm_activation: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mat2', 'simple_type': 'Tensor'}], + torch.Tensor.sparse_resize_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'sparse_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dense_dim', 'simple_type': 'int64_t'}], + torch.Tensor.sparse_resize_and_clear_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'IntArrayRef'}, {'is_kwarg_only': 'False', 'name': 'sparse_dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dense_dim', 'simple_type': 'int64_t'}], + torch.Tensor.sparse_mask: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch.Tensor._sparse_mask_projection: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch.Tensor.to_dense: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._to_dense: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sparse_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._dimI: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.dense_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._dimV: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._nnz: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.coalesce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.is_coalesced: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._values: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._coalesced_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'coalesced', 'simple_type': 'bool'}], + torch.Tensor.indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.values: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.crow_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.col_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.ccol_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.row_indices: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.unbind: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.unbind: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.to_sparse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sparse_dim', 'simple_type': 'int64_t'}], + torch.Tensor.to_sparse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._to_sparse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'sparse_dim', 'simple_type': 'int64_t'}], + torch.Tensor._to_sparse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.to_sparse_csr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._to_sparse_csr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.to_sparse_csc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._to_sparse_csc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.to_sparse_bsr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blocksize', 'simple_type': 'IntArrayRef', 'size': 2}], + torch.Tensor._to_sparse_bsr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blocksize', 'simple_type': 'IntArrayRef', 'size': 2}], + torch.Tensor.to_sparse_bsc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blocksize', 'simple_type': 'IntArrayRef', 'size': 2}], + torch.Tensor._to_sparse_bsc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'blocksize', 'simple_type': 'IntArrayRef', 'size': 2}], + torch.Tensor.to_mkldnn: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.dequantize: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.q_scale: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.q_zero_point: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.q_per_channel_scales: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.q_per_channel_zero_points: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.q_per_channel_axis: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.int_repr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.qscheme: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor._autocast_to_reduced_precision: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cuda_enabled', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cpu_enabled', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cuda_dtype', 'simple_type': 'ScalarType'}, {'is_kwarg_only': 'False', 'name': 'cpu_dtype', 'simple_type': 'ScalarType'}], + torch.Tensor._autocast_to_full_precision: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'cuda_enabled', 'simple_type': 'bool'}, {'is_kwarg_only': 'False', 'name': 'cpu_enabled', 'simple_type': 'bool'}], + torch.Tensor.is_set_to: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor', 'simple_type': 'Tensor'}], + torch.Tensor.masked_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.masked_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.masked_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.masked_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.masked_scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.masked_scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'SymIntArrayRef'}], + torch.Tensor.view: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dtype', 'simple_type': 'ScalarType'}], + torch.Tensor.put_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.put: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}], + torch.Tensor.index_reduce_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch.Tensor.index_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'source', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch.Tensor.index_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.index_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.index_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.index_fill_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.index_fill: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Tensor'}], + torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'True', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'value', 'simple_type': 'Scalar'}], + torch.Tensor.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter_add: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter_add_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}], + torch.Tensor.scatter_reduce: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch.Tensor.scatter_reduce_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'src', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'reduce', 'simple_type': 'c10::string_view'}], + torch.Tensor.eq_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.eq_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_and: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_and_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_and_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__and__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__and__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__iand__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__iand__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_or: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_or_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_or_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__or__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__or__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__ior__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__ior__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_xor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_xor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_xor_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__xor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__xor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__ixor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__ixor__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__lshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__lshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__ilshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__ilshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_left_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_left_shift_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_left_shift_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__rshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__rshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.__irshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.__irshift__: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_right_shift: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.bitwise_right_shift_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.bitwise_right_shift_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.tril_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.triu_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.digamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.lerp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}], + torch.Tensor.lerp_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch.Tensor.addbmm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch.Tensor.addbmm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'batch2', 'simple_type': 'Tensor'}], + torch.Tensor.random_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'from', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'to', 'simple_type': 'int64_t?'}], + torch.Tensor.random_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'to', 'simple_type': 'int64_t'}], + torch.Tensor.random_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.uniform_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cauchy_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.log_normal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.exponential_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.geometric_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'double'}], + torch.Tensor.diag: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cross: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.triu: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.tril: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.trace: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.ne: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.ne_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.ne_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.not_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.not_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.not_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.eq: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.ge: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.ge_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.ge_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.greater_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.greater_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.greater_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.le: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.le_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.le_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.less_equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.less_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.less_equal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.gt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.gt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.gt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.greater: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.greater_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.greater_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.lt: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.lt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.lt_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.less: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.less_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.less_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.take: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch.Tensor.take_along_dim: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'indices', 'simple_type': 'Tensor'}], + torch.Tensor.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch.Tensor.index_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch.Tensor.masked_select: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'mask', 'simple_type': 'Tensor'}], + torch.Tensor.nonzero_static: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'size', 'simple_type': 'int64_t'}], + torch.Tensor.argwhere: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch.Tensor.gather: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}, {'is_kwarg_only': 'False', 'name': 'index', 'simple_type': 'Tensor'}], + torch.Tensor.addcmul: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch.Tensor.addcmul_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch.Tensor.addcdiv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch.Tensor.addcdiv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor1', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'tensor2', 'simple_type': 'Tensor'}], + torch.Tensor.triangular_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'A', 'simple_type': 'Tensor'}], + torch.Tensor.svd: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.swapaxes: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'axis1', 'simple_type': 'int64_t'}], + torch.Tensor.swapaxes_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'axis0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'axis1', 'simple_type': 'int64_t'}], + torch.Tensor.swapdims: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch.Tensor.swapdims_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim0', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'dim1', 'simple_type': 'int64_t'}], + torch.Tensor.cholesky: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.cholesky_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}], + torch.Tensor.cholesky_inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.qr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.geqrf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.orgqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}], + torch.Tensor.ormqr: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input2', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'input3', 'simple_type': 'Tensor'}], + torch.Tensor.lu_solve: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_data', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'LU_pivots', 'simple_type': 'Tensor'}], + torch.Tensor.multinomial: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'num_samples', 'simple_type': 'int64_t'}], + torch.Tensor.lgamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.lgamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.digamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.polygamma: [{'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.polygamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'n', 'simple_type': 'int64_t'}], + torch.Tensor.erfinv: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.erfinv_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.i0: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.i0_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sign: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sign_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.signbit: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.dist: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.atan2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.atan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.arctan2: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.arctan2_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Scalar'}], + torch.Tensor.lerp: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'end', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'weight', 'simple_type': 'Tensor'}], + torch.Tensor.histc: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'bins', 'simple_type': 'Tensor'}], + torch.Tensor.histogram: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.fmod: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.fmod_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.fmod_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.hypot: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.hypot_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.igamma: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.igamma_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.igammac: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.igammac_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.nextafter: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.nextafter_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.remainder: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.remainder_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Scalar'}], + torch.Tensor.remainder_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.fmin: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.fmax: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.maximum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.minimum: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}], + torch.Tensor.quantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}], + torch.Tensor.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'Tensor'}], + torch.Tensor.nanquantile: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'q', 'simple_type': 'double'}], + torch.Tensor.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}], + torch.Tensor.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.sort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool?'}, {'is_kwarg_only': 'True', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.msort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'True', 'name': 'stable', 'simple_type': 'bool'}], + torch.Tensor.argsort: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'Dimname'}], + torch.Tensor.topk: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'k', 'simple_type': 'SymInt'}], + torch.Tensor.renorm: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'maxnorm', 'simple_type': 'Scalar'}], + torch.Tensor.renorm_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'p', 'simple_type': 'Scalar'}, {'is_kwarg_only': 'False', 'name': 'dim', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'maxnorm', 'simple_type': 'Scalar'}], + torch.Tensor.unfold: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'dimension', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'size', 'simple_type': 'int64_t'}, {'is_kwarg_only': 'False', 'name': 'step', 'simple_type': 'int64_t'}], + torch.Tensor.equal: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch.Tensor.pow: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch.Tensor.pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch.Tensor.pow_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch.Tensor.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch.Tensor.float_power: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch.Tensor.float_power_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Scalar'}], + torch.Tensor.float_power_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'exponent', 'simple_type': 'Tensor'}], + torch.Tensor.normal_: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.isfinite: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.isinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.record_stream: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 's', 'simple_type': 'Stream'}], + torch.Tensor.isposinf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.isneginf: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.det: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.slogdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.logdet: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.inverse: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}], + torch.Tensor.inner: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'other', 'simple_type': 'Tensor'}], + torch.Tensor.outer: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch.Tensor.ger: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'vec2', 'simple_type': 'Tensor'}], + torch.Tensor.to_padded_tensor: [{'is_kwarg_only': 'False', 'name': 'self', 'simple_type': 'Tensor'}, {'is_kwarg_only': 'False', 'name': 'padding', 'simple_type': 'double'}], +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/hypothesis_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/hypothesis_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0654a64b96b268b5c64cb95538551f5967b30395 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/hypothesis_utils.py @@ -0,0 +1,369 @@ +from collections import defaultdict +from collections.abc import Iterable +import numpy as np +import torch + +import hypothesis +from functools import reduce +from hypothesis import assume +from hypothesis import settings +from hypothesis import strategies as st +from hypothesis.extra import numpy as stnp +from hypothesis.strategies import SearchStrategy + +from torch.testing._internal.common_quantized import _calculate_dynamic_qparams, _calculate_dynamic_per_channel_qparams + +# Setup for the hypothesis tests. +# The tuples are (torch_quantized_dtype, zero_point_enforce), where the last +# element is enforced zero_point. If None, any zero_point point within the +# range of the data type is OK. + +# Tuple with all quantized data types. +_ALL_QINT_TYPES = ( + torch.quint8, + torch.qint8, + torch.qint32, +) + +# Enforced zero point for every quantized data type. +# If None, any zero_point point within the range of the data type is OK. +_ENFORCED_ZERO_POINT = defaultdict(lambda: None, { + torch.quint8: None, + torch.qint8: None, + torch.qint32: 0 +}) + +def _get_valid_min_max(qparams): + scale, zero_point, quantized_type = qparams + adjustment = 1 + torch.finfo(torch.float).eps + _long_type_info = torch.iinfo(torch.long) + long_min, long_max = _long_type_info.min / adjustment, _long_type_info.max / adjustment + # make sure intermediate results are within the range of long + min_value = max((long_min - zero_point) * scale, (long_min / scale + zero_point)) + max_value = min((long_max - zero_point) * scale, (long_max / scale + zero_point)) + return np.float32(min_value), np.float32(max_value) + +# This wrapper wraps around `st.floats` and checks the version of `hypothesis`, if +# it is too old, removes the `width` parameter (which was introduced) +# in 3.67.0 +def _floats_wrapper(*args, **kwargs): + if 'width' in kwargs and hypothesis.version.__version_info__ < (3, 67, 0): + # As long as nan, inf, min, max are not specified, reimplement the width + # parameter for older versions of hypothesis. + no_nan_and_inf = ( + (('allow_nan' in kwargs and not kwargs['allow_nan']) or + 'allow_nan' not in kwargs) and + (('allow_infinity' in kwargs and not kwargs['allow_infinity']) or + 'allow_infinity' not in kwargs)) + min_and_max_not_specified = ( + len(args) == 0 and + 'min_value' not in kwargs and + 'max_value' not in kwargs + ) + if no_nan_and_inf and min_and_max_not_specified: + if kwargs['width'] == 16: + kwargs['min_value'] = torch.finfo(torch.float16).min + kwargs['max_value'] = torch.finfo(torch.float16).max + elif kwargs['width'] == 32: + kwargs['min_value'] = torch.finfo(torch.float32).min + kwargs['max_value'] = torch.finfo(torch.float32).max + elif kwargs['width'] == 64: + kwargs['min_value'] = torch.finfo(torch.float64).min + kwargs['max_value'] = torch.finfo(torch.float64).max + kwargs.pop('width') + return st.floats(*args, **kwargs) + +def floats(*args, **kwargs): + if 'width' not in kwargs: + kwargs['width'] = 32 + return _floats_wrapper(*args, **kwargs) + +"""Hypothesis filter to avoid overflows with quantized tensors. + +Args: + tensor: Tensor of floats to filter + qparams: Quantization parameters as returned by the `qparams`. + +Returns: + True + +Raises: + hypothesis.UnsatisfiedAssumption + +Note: This filter is slow. Use it only when filtering of the test cases is + absolutely necessary! +""" +def assume_not_overflowing(tensor, qparams): + min_value, max_value = _get_valid_min_max(qparams) + assume(tensor.min() >= min_value) + assume(tensor.max() <= max_value) + return True + +"""Strategy for generating the quantization parameters. + +Args: + dtypes: quantized data types to sample from. + scale_min / scale_max: Min and max scales. If None, set to 1e-3 / 1e3. + zero_point_min / zero_point_max: Min and max for the zero point. If None, + set to the minimum and maximum of the quantized data type. + Note: The min and max are only valid if the zero_point is not enforced + by the data type itself. + +Generates: + scale: Sampled scale. + zero_point: Sampled zero point. + quantized_type: Sampled quantized type. +""" +@st.composite +def qparams(draw, dtypes=None, scale_min=None, scale_max=None, + zero_point_min=None, zero_point_max=None): + if dtypes is None: + dtypes = _ALL_QINT_TYPES + if not isinstance(dtypes, (list, tuple)): + dtypes = (dtypes,) + quantized_type = draw(st.sampled_from(dtypes)) + + _type_info = torch.iinfo(quantized_type) + qmin, qmax = _type_info.min, _type_info.max + + # TODO: Maybe embed the enforced zero_point in the `torch.iinfo`. + _zp_enforced = _ENFORCED_ZERO_POINT[quantized_type] + if _zp_enforced is not None: + zero_point = _zp_enforced + else: + _zp_min = qmin if zero_point_min is None else zero_point_min + _zp_max = qmax if zero_point_max is None else zero_point_max + zero_point = draw(st.integers(min_value=_zp_min, max_value=_zp_max)) + + if scale_min is None: + scale_min = torch.finfo(torch.float).eps + if scale_max is None: + scale_max = torch.finfo(torch.float).max + scale = draw(floats(min_value=scale_min, max_value=scale_max, width=32)) + + return scale, zero_point, quantized_type + +"""Strategy to create different shapes. +Args: + min_dims / max_dims: minimum and maximum rank. + min_side / max_side: minimum and maximum dimensions per rank. + +Generates: + Possible shapes for a tensor, constrained to the rank and dimensionality. + +Example: + # Generates 3D and 4D tensors. + @given(Q = qtensor(shapes=array_shapes(min_dims=3, max_dims=4)) + some_test(self, Q):... +""" +@st.composite +def array_shapes(draw, min_dims=1, max_dims=None, min_side=1, max_side=None, max_numel=None): + """Return a strategy for array shapes (tuples of int >= 1).""" + assert(min_dims < 32) + if max_dims is None: + max_dims = min(min_dims + 2, 32) + assert(max_dims < 32) + if max_side is None: + max_side = min_side + 5 + candidate = st.lists(st.integers(min_side, max_side), min_size=min_dims, max_size=max_dims) + if max_numel is not None: + candidate = candidate.filter(lambda x: reduce(int.__mul__, x, 1) <= max_numel) + return draw(candidate.map(tuple)) + + +"""Strategy for generating test cases for tensors. +The resulting tensor is in float32 format. + +Args: + shapes: Shapes under test for the tensor. Could be either a hypothesis + strategy, or an iterable of different shapes to sample from. + elements: Elements to generate from for the returned data type. + If None, the strategy resolves to float within range [-1e6, 1e6]. + qparams: Instance of the qparams strategy. This is used to filter the tensor + such that the overflow would not happen. + +Generates: + X: Tensor of type float32. Note that NaN and +/-inf is not included. + qparams: (If `qparams` arg is set) Quantization parameters for X. + The returned parameters are `(scale, zero_point, quantization_type)`. + (If `qparams` arg is None), returns None. +""" +@st.composite +def tensor(draw, shapes=None, elements=None, qparams=None, dtype=np.float32): + if isinstance(shapes, SearchStrategy): + _shape = draw(shapes) + else: + _shape = draw(st.sampled_from(shapes)) + if qparams is None: + if elements is None: + elements = floats(-1e6, 1e6, allow_nan=False, width=32) + X = draw(stnp.arrays(dtype=dtype, elements=elements, shape=_shape)) + assume(not (np.isnan(X).any() or np.isinf(X).any())) + return X, None + qparams = draw(qparams) + if elements is None: + min_value, max_value = _get_valid_min_max(qparams) + elements = floats(min_value, max_value, allow_infinity=False, + allow_nan=False, width=32) + X = draw(stnp.arrays(dtype=dtype, elements=elements, shape=_shape)) + # Recompute the scale and zero_points according to the X statistics. + scale, zp = _calculate_dynamic_qparams(X, qparams[2]) + enforced_zp = _ENFORCED_ZERO_POINT.get(qparams[2], None) + if enforced_zp is not None: + zp = enforced_zp + return X, (scale, zp, qparams[2]) + +@st.composite +def per_channel_tensor(draw, shapes=None, elements=None, qparams=None): + if isinstance(shapes, SearchStrategy): + _shape = draw(shapes) + else: + _shape = draw(st.sampled_from(shapes)) + if qparams is None: + if elements is None: + elements = floats(-1e6, 1e6, allow_nan=False, width=32) + X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape)) + assume(not (np.isnan(X).any() or np.isinf(X).any())) + return X, None + qparams = draw(qparams) + if elements is None: + min_value, max_value = _get_valid_min_max(qparams) + elements = floats(min_value, max_value, allow_infinity=False, + allow_nan=False, width=32) + X = draw(stnp.arrays(dtype=np.float32, elements=elements, shape=_shape)) + # Recompute the scale and zero_points according to the X statistics. + scale, zp = _calculate_dynamic_per_channel_qparams(X, qparams[2]) + enforced_zp = _ENFORCED_ZERO_POINT.get(qparams[2], None) + if enforced_zp is not None: + zp = enforced_zp + # Permute to model quantization along an axis + axis = int(np.random.randint(0, X.ndim, 1)) + permute_axes = np.arange(X.ndim) + permute_axes[0] = axis + permute_axes[axis] = 0 + X = np.transpose(X, permute_axes) + + return X, (scale, zp, axis, qparams[2]) + +"""Strategy for generating test cases for tensors used in Conv. +The resulting tensors is in float32 format. + +Args: + spatial_dim: Spatial Dim for feature maps. If given as an iterable, randomly + picks one from the pool to make it the spatial dimension + batch_size_range: Range to generate `batch_size`. + Must be tuple of `(min, max)`. + input_channels_per_group_range: + Range to generate `input_channels_per_group`. + Must be tuple of `(min, max)`. + output_channels_per_group_range: + Range to generate `output_channels_per_group`. + Must be tuple of `(min, max)`. + feature_map_range: Range to generate feature map size for each spatial_dim. + Must be tuple of `(min, max)`. + kernel_range: Range to generate kernel size for each spatial_dim. Must be + tuple of `(min, max)`. + max_groups: Maximum number of groups to generate. + elements: Elements to generate from for the returned data type. + If None, the strategy resolves to float within range [-1e6, 1e6]. + qparams: Strategy for quantization parameters. for X, w, and b. + Could be either a single strategy (used for all) or a list of + three strategies for X, w, b. +Generates: + (X, W, b, g): Tensors of type `float32` of the following drawen shapes: + X: (`batch_size, input_channels, H, W`) + W: (`output_channels, input_channels_per_group) + kernel_shape + b: `(output_channels,)` + groups: Number of groups the input is divided into +Note: X, W, b are tuples of (Tensor, qparams), where qparams could be either + None or (scale, zero_point, quantized_type) + + +Example: + @given(tensor_conv( + spatial_dim=2, + batch_size_range=(1, 3), + input_channels_per_group_range=(1, 7), + output_channels_per_group_range=(1, 7), + feature_map_range=(6, 12), + kernel_range=(3, 5), + max_groups=4, + elements=st.floats(-1.0, 1.0), + qparams=qparams() + )) +""" +@st.composite +def tensor_conv( + draw, spatial_dim=2, batch_size_range=(1, 4), + input_channels_per_group_range=(3, 7), + output_channels_per_group_range=(3, 7), feature_map_range=(6, 12), + kernel_range=(3, 7), max_groups=1, can_be_transposed=False, + elements=None, qparams=None +): + + # Resolve the minibatch, in_channels, out_channels, iH/iW, iK/iW + batch_size = draw(st.integers(*batch_size_range)) + input_channels_per_group = draw( + st.integers(*input_channels_per_group_range)) + output_channels_per_group = draw( + st.integers(*output_channels_per_group_range)) + groups = draw(st.integers(1, max_groups)) + input_channels = input_channels_per_group * groups + output_channels = output_channels_per_group * groups + + if isinstance(spatial_dim, Iterable): + spatial_dim = draw(st.sampled_from(spatial_dim)) + + feature_map_shape = [] + for i in range(spatial_dim): + feature_map_shape.append(draw(st.integers(*feature_map_range))) + + kernels = [] + for i in range(spatial_dim): + kernels.append(draw(st.integers(*kernel_range))) + + tr = False + weight_shape = (output_channels, input_channels_per_group) + tuple(kernels) + bias_shape = output_channels + if can_be_transposed: + tr = draw(st.booleans()) + if tr: + weight_shape = (input_channels, output_channels_per_group) + tuple(kernels) + bias_shape = output_channels + + # Resolve the tensors + if qparams is not None: + if isinstance(qparams, (list, tuple)): + assert(len(qparams) == 3), "Need 3 qparams for X, w, b" + else: + qparams = [qparams] * 3 + + X = draw(tensor(shapes=( + (batch_size, input_channels) + tuple(feature_map_shape),), + elements=elements, qparams=qparams[0])) + W = draw(tensor(shapes=(weight_shape,), elements=elements, + qparams=qparams[1])) + b = draw(tensor(shapes=(bias_shape,), elements=elements, + qparams=qparams[2])) + + return X, W, b, groups, tr + +# We set the deadline in the currently loaded profile. +# Creating (and loading) a separate profile overrides any settings the user +# already specified. +hypothesis_version = hypothesis.version.__version_info__ +current_settings = settings._profiles[settings._current_profile].__dict__ +current_settings['deadline'] = None +if hypothesis_version >= (3, 16, 0) and hypothesis_version < (5, 0, 0): + current_settings['timeout'] = hypothesis.unlimited +def assert_deadline_disabled(): + if hypothesis_version < (3, 27, 0): + import warnings + warning_message = ( + "Your version of hypothesis is outdated. " + "To avoid `DeadlineExceeded` errors, please update. " + f"Current hypothesis version: {hypothesis.__version__}" + ) + warnings.warn(warning_message) + else: + assert settings().deadline is None diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/inductor_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/inductor_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5404d3620fe2ea68e7439a575707c9a2a4edc63a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/inductor_utils.py @@ -0,0 +1,67 @@ +import torch +import re +import unittest +from subprocess import CalledProcessError + +from torch._inductor.codecache import CppCodeCache +from torch.utils._triton import has_triton +from torch.testing._internal.common_utils import ( + LazyVal, + IS_FBCODE, +) +from torch._dynamo.backends.registry import register_backend +from torch._inductor.compile_fx import compile_fx, count_bytes_inner +from torch.testing._internal.common_utils import TestCase + +def test_cpu(): + try: + CppCodeCache.load("") + return not IS_FBCODE + except ( + CalledProcessError, + OSError, + torch._inductor.exc.InvalidCxxCompiler, + torch._inductor.exc.CppCompileError, + ): + return False + +HAS_CPU = LazyVal(test_cpu) + +HAS_CUDA = has_triton() + +@register_backend +def count_bytes_inductor(gm, example_inputs): + return compile_fx(gm, example_inputs, inner_compile=count_bytes_inner) + +def _check_has_dynamic_shape( + self: TestCase, + code, +): + for_loop_found = False + has_dynamic = False + lines = code.split("\n") + for line in lines: + if "for(" in line: + for_loop_found = True + if re.search(r";.*ks.*;", line) is not None: + has_dynamic = True + break + self.assertTrue( + has_dynamic, msg=f"Failed to find dynamic for loop variable\n{code}" + ) + self.assertTrue(for_loop_found, f"Failed to find for loop\n{code}") + + +def skipCUDAIf(cond, msg): + if cond: + def decorate_fn(fn): + def inner(self, *args, **kwargs): + if self.device == "cuda": + raise unittest.SkipTest(msg) + return fn(self, *args, **kwargs) + return inner + else: + def decorate_fn(fn): + return fn + + return decorate_fn diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..863c2841c511afd32ef176740cd071eb6c447479 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/jit_metaprogramming_utils.py @@ -0,0 +1,719 @@ +# Torch +from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401 +import torch.nn.functional as F +import torch +import torch.cuda +import torch.jit +import torch.jit._logging +import torch.jit.frontend +from torch.testing._internal.common_nn import module_tests, new_module_tests +from torch.testing._internal.common_utils import is_iterable_of_tensors + +import collections +from copy import deepcopy +from typing import Any, Dict, List, Union +import math # noqa: F401 + +# Testing utils +from torch import inf + +assert torch.get_default_dtype() == torch.float32 + +L = 20 +M = 10 +S = 5 + + +def unpack_variables(args): + if isinstance(args, tuple): + return tuple(unpack_variables(elem) for elem in args) + else: + return args + +class dont_convert(tuple): + pass + +non_differentiable = collections.namedtuple('non_differentiable', ['tensor']) + +def create_input(call_args, requires_grad=True, non_contiguous=False, call_kwargs=None, dtype=torch.float, device=None): + if not isinstance(call_args, tuple): + call_args = (call_args,) + + def map_arg(arg): + def maybe_non_contig(tensor): + if not non_contiguous or tensor.numel() < 2: + return tensor.clone() + + return noncontiguous_like(tensor) + + def conjugate(tensor): + return tensor.conj() + + if isinstance(arg, (torch.Size, dont_convert)): + return arg + elif isinstance(arg, tuple) and len(arg) == 0: + var = conjugate(torch.randn((), dtype=dtype, device=device)) + var.requires_grad = requires_grad + return var + elif isinstance(arg, tuple) and not isinstance(arg[0], torch.Tensor): + return conjugate(maybe_non_contig(torch.randn(*arg, dtype=dtype, device=device))).requires_grad_(requires_grad) + # double check casting + elif isinstance(arg, non_differentiable): + if isinstance(arg.tensor, torch.Tensor): + return conjugate(maybe_non_contig(arg.tensor.to(device=device))) + return conjugate(maybe_non_contig(arg.tensor.to(device=device))) + elif isinstance(arg, torch.Tensor): + if arg.is_complex() != dtype.is_complex: + raise RuntimeError("User provided tensor is real for a test that runs with complex dtype, ", + "which is not supported for now") + # NOTE: We do clone() after detach() here because we need to be able to change size/storage of v afterwards + v = conjugate(maybe_non_contig(arg)).detach().to(device=device).clone() + v.requires_grad = requires_grad and (v.is_floating_point() or v.is_complex()) + return v + elif callable(arg): + return map_arg(arg(dtype=dtype, device=device)) + else: + return arg + args_out = tuple(map_arg(arg) for arg in call_args) + kwargs_out = {k: map_arg(v) for k, v in call_kwargs.items()} if call_kwargs else {} + return args_out, kwargs_out + +# NB: JIT script tests for all nn functional interfaces, script mode does +# not support in_place operations yet, so no inplace operation tests added. +# removed all the deprecated functions +# +# ( +# method name, +# input size/constructing fn, +# args (tuple represents shape of a tensor arg), +# test variant name(will be used at test name suffix, +# 'inplace' skips grad tests), // optional +# (True, nonfusible_nodes, fusible_nodes) for autodiff // optional +# fn to determine if test should be skipped, // optional +# fn mapping output to part that should be gradcheck'ed, // optional +# kwargs for function, // optional +# ) +nn_functional_tests = [ + ('conv1d', (S, S, S), ((S, S, S),)), + ('conv2d', (S, S, S, S), ((S, S, S, S),)), + ('conv3d', (S, S, S, S, S), ((S, S, S, S, S),)), + ('conv_transpose1d', (S, S, S), ((S, S, S),)), + ('conv_transpose2d', (S, S, S, S), ((S, S, S, S),)), + ('conv_transpose3d', (S, S, S, S, S), ((S, S, S, S, S),)), + ('conv_tbc', (S, S, S), ((S, S, S), (S,), 2)), + ('avg_pool1d', (S, S, S), (3,)), + ('avg_pool2d', (S, S, S, S), (3,), '', (True,)), + ('avg_pool3d', (S, S, S, S, S), (3,)), + ('fractional_max_pool2d', (S, S, S, S), (3, [2, 3],)), + ('max_pool1d', (S, S, S), (2, 1)), + ('max_pool1d', (S, S, S), (2, 1, 1, 1, False, True), 'with_indices'), + ('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')), + ('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')), + ('max_pool3d', (S, S, S, S, S), (2, 1)), + ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)), + ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)), + ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)), + ('lp_pool1d', (S, S, S), (2., 3, 2,)), + ('lp_pool2d', (S, S, S, S), (2., 3, 2,)), + ('adaptive_max_pool1d', (S, S, S), (5,)), + ('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)), + ('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)), + ('adaptive_avg_pool1d', (S, S, S), (5,), '', (True,)), + ('adaptive_avg_pool2d', (S, S, S, S), ([5, 7],), '', (True,)), + ('adaptive_avg_pool3d', (S, S, S, S, S), ([3, 2, 2],), '', (True,)), + ('dropout', (S, S, S), (0.5,), '', (True, 'aten::native_dropout')), + ('alpha_dropout', (S, S, S), (0.5,)), + ('dropout2d', (S, S, S), (0.5,)), + ('dropout2d', (S, S, S, S), (0.5,), 'batched'), + ('dropout3d', (S, S, S, S), (0.5,)), + ('dropout3d', (S, S, S, S, S), (0.5,), 'batched'), + ('feature_alpha_dropout', (S, S, S), (0.5,)), + ('threshold', (S, S, S), (0.1, 2.), '', (True,)), + ('threshold', (S, S, S), (0.1, 2., True), 'inplace'), + ('relu', (S, S, S), (), '', (True,)), + ('relu', (S, S, S), (), 'inplace'), + ('glu', (S - 1, S - 1, S - 1), (),), + ('hardtanh', (S, S, S), (-0.5, 0.5), '', (True,)), + ('hardtanh', (S, S, S), (-0.5, 0.5, True), 'inplace'), + ('relu6', (S, S, S), (), '', (True,)), + ('relu6', (S, S, S), (True), 'inplace'), + ('elu', (S, S, S), (0.9,),), + ('elu', (S, S, S), (0.9, True), 'inplace'), + ('selu', (S, S, S), (),), + ('selu', (S, S, S), (True), 'inplace'), + ('celu', (S, S, S), (0.9,),), + ('celu', (S, S, S), (0.9, True), 'inplace'), + ('leaky_relu', (S, S, S), (0.02,), '', (True,)), + ('leaky_relu', (S, S, S), (0.02,), 'inplace'), + ('rrelu', (S, S), (0.1, 0.3, False),), + ('rrelu', (S, S), (0.1, 0.3, False, True), 'inplace'), + ('hardshrink', (S, S, S), (0.4,), '', (True,)), + ('tanhshrink', (S, S, S), (),), + ('softsign', (S, S, S), (),), + ('softplus', (S, S, S), (), '', (True,)), + ('softmin', (S, S, S), (0,),), + ('softmax', (S, S, S), (0,), '', (True,)), + ('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)), + ('tanh', (S, S, S), (), '', (True,)), + ('sigmoid', (S, S, S), (), '', (True,)), + ('silu', (S, S, S), (), '', (True,)), + ('log_softmax', (S, S, S), (0,), '', (True,)), + ('linear', (S, S), ((M, S),), '', (True, ['aten::linear'])), + ('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::linear'])), + ('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),), + ('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)), + ('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),), + ('batch_norm', (S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), None, None, True, ), + 'training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (0, S, S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ), + 'size_zero', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (0, S, S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ), + 'size_zero_inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), + (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), True, ), + 'with_weight_and_bias_training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + None, non_differentiable(torch.ones(S)), True, ), + 'with_only_bias_training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), None, True, ), + 'with_only_weight_training', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + None, None, False, ), + 'inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), False, ), + 'with_weight_and_bias_inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + None, non_differentiable(torch.ones(S)), False, ), + 'with_only_bias_inference', (True, 'aten::_batch_norm_impl_index')), + ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), + non_differentiable(torch.randn(S)), None, False, ), + 'with_only_weight_inference', (True, 'aten::_batch_norm_impl_index')), + ('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),), + ('layer_norm', (S, S, S, S), ([5],), '', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])), + ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),), 'with_only_weight', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])), + ('layer_norm', (S, S, S, S), ([5], None, non_differentiable(torch.rand(S)),), 'with_only_bias', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])), + ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)), + non_differentiable(torch.rand(S))), 'with_weight_and_bias', + (False, ['aten::contiguous', 'aten::_batch_norm_impl_index', 'aten::addcmul'])), + ('group_norm', (S, S, S), (1, torch.rand(5),),), + ('local_response_norm', (S, S, S), (2, ),), + ('nll_loss', F.log_softmax(torch.randn(3, 5), dim=0), (torch.tensor([1, 0, 4]),), '',), + ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2),),), + ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2), True, True), 'full'), + ('kl_div', F.log_softmax(torch.randn(S, 10), 1), (F.softmax(torch.randn(S, 10), 1),),), + ('cross_entropy', (3, S), (torch.randint(S, (3,), dtype=torch.int64),),), + ('binary_cross_entropy_with_logits', (3,), (torch.empty(3).random_(2), ),), + ('smooth_l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('huber_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('mse_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('smooth_l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('huber_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('mse_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'), + ('margin_ranking_loss', (S,), ((S,), (S,)),), + ('hinge_embedding_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('multilabel_soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),), + ('cosine_embedding_loss', (S, S), ((S, S), non_differentiable(torch.rand(S,))),), + ('pixel_shuffle', (1, 9, 4, 4), (3,),), + ('pixel_unshuffle', (1, 1, 12, 12), (3,),), + ('affine_grid', (S, 2, 3), (torch.Size([S, 1, 7, 7]),),), + ('pad', (3, 3, 4, 2), ([1, 1],),), + ('pairwise_distance', (S, S), ((S, S),),), + ('pdist', (S, S), (),), + ('cosine_similarity', (S, S), ((S, S),),), + ('triplet_margin_loss', (S, S), ((S, S), (S, S)),), + ('normalize', (S, S, S), (),), + ('unfold', (S, S, S, S), ([2, 3]),), + ('fold', (1, 3 * 2 * 2, 12), ([4, 5], [2, 2]),), + ('grid_sample', (S, S, S, S), (non_differentiable(torch.rand(S, S, S, 2)),),), + ('gumbel_softmax', (S, S), (2.,), '', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])), + ('gumbel_softmax', (S, S), (2., True,), 'hard', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])), + ('multilabel_margin_loss', torch.tensor([[0.2, -0.2, 0.07]]), (torch.tensor([[0, 0, 1]]),),), + ('multi_margin_loss', (S, S), (non_differentiable(torch.randint(S, (S, ), dtype=torch.int64)), + 1, 1., non_differentiable(torch.randn(S))),), + ('binary_cross_entropy', torch.randn(3, 2).sigmoid(), (non_differentiable(torch.rand(3, 2)), + non_differentiable(torch.randn(3, 2))),), + ('binary_cross_entropy', torch.randn(3, 2).sigmoid(), + (non_differentiable(torch.rand(3, 2)), + non_differentiable(torch.randn(3, 2)), None, None, 'mean'), 'size_average'), + ('ctc_loss', torch.rand(S, S, S).log_softmax(2).detach().requires_grad_(), + (torch.randint(1, S, (S, S), dtype=torch.long), torch.full((S,), S, dtype=torch.long), + torch.randint(1, S, (S,), dtype=torch.long))), + ('upsample', torch.randn(S, S, M, M), (None, 2.), 'with_scale'), + ('upsample', torch.randn(S, S, M, M), (4,), 'with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'nearest_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'nearest_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'nearest_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'area_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'area_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'area_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bilinear_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bilinear_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'bilinear_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bicubic_4d'), + ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bicubic_4d_with_scale'), + ('interpolate', torch.randn(S, S, M, M), (4,), 'bicubic_4d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'nearest_3d'), + ('interpolate', torch.randn(S, M, M), (None, 2.), 'nearest_3d_with_scale'), + ('interpolate', torch.randn(S, M, M), (4,), 'nearest_3d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'area_3d'), + ('interpolate', torch.randn(S, M, M), (None, 2.), 'area_3d_with_scale'), + ('interpolate', torch.randn(S, M, M), (4,), 'area_3d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'linear_3d'), + ('interpolate', torch.randn(S, M, M), (None, 2.), 'linear_3d_with_scale'), + ('interpolate', torch.randn(S, M, M), (4,), 'linear_3d_with_size'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'nearest_5d_with_scale'), + ('interpolate', torch.randn(S, M, M, M, M), (4,), 'nearest_5d_with_size'), + ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'area_5d'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'area_5d_with_scale'), + ('interpolate', torch.randn(S, M, M, M, M), (4,), 'area_5d_with_size'), + ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'trilinear_5d'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'trilinear_5d_with_scale'), + ('interpolate', torch.randn(S, M, M, M, M), (4,), 'trilinear_5d_with_size'), + ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2, None, 'nearest', None, False), + 'nearest_4d_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (4, None, 'nearest', None, False), + 'nearest_4d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bilinear', None, False), + 'bilinear_4d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (4, None, 'bilinear', None, False), + 'bilinear_4d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bicubic', None, False), + 'bicubic_4d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, S, M, M), (4, None, 'bicubic', None, False), + 'bicubic_4d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (None, 2., 'nearest', None, False), + 'nearest_3d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (4, None, 'nearest', None, False), + 'nearest_3d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (None, 2., 'linear', None, False), + 'linear_3d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M), (4, None, 'linear', None, False), + 'linear_3d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'nearest', None, False), + 'nearest_5d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'nearest', None, False), + 'nearest_5d_with_size_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'trilinear', None, False), + 'trilinear_5d_with_scale_not_recompute_scale_factor'), + ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'trilinear', None, False), + 'trilinear_5d_with_size_not_recompute_scale_factor'), +] + +script_template = ''' +def the_method({}): + return {} +''' + +def value_to_literal(value): + if isinstance(value, str): + # Quotes string and escapes special characters + return ascii(value) + if isinstance(value, torch.Tensor): + return 'torch.' + str(value) + else: + return str(value) + +def get_call(method_name, func_type, args, kwargs): + kwargs_str = ', '.join([k + '=' + value_to_literal(v) for k, v in kwargs.items()]) + self_arg = args[0] + if(func_type == 'method'): + args = args[1:] + + argument_str = ', '.join(args) + argument_str += ', ' if len(args) and len(kwargs) else '' + argument_str += kwargs_str + + if func_type == 'functional' or func_type == 'function': + call = f'torch.{method_name}({argument_str})' + elif func_type == 'method': + call = f'{self_arg}.{method_name}({argument_str})' + elif func_type == 'nn_functional': + call = f'torch.nn.functional.{method_name}({argument_str})' + else: + raise TypeError('Unsupported function type') + + return call + +def get_constant(x): + if x == inf: + return 'math.inf' + if x == -inf: + return '-math.inf' + return x + +def get_script_args(args): + formals: List[str] = [] + tensors: List[Union[torch.Tensor, List[torch.Tensor]]] = [] + actuals: List[str] = [] + for arg in args: + if isinstance(arg, torch.Tensor): + name = f'i{len(formals)}' + formals.append(name) + actuals.append(name) + tensors.append(arg) + elif is_iterable_of_tensors(arg): + name = f'i{len(formals)}' + formals.append(name + ': List[torch.Tensor]') + actuals.append(name) + tensors.append(list(arg)) + elif isinstance(arg, str): + actuals.append(f"'{arg}'") + else: + actuals.append(str(get_constant(arg))) + return (formals, tensors, actuals) + +# create a script function from (name, func_type, output_process_fn), +# and returns the compiled function and example inputs +def gen_script_fn_and_args(method_name, func_type, *args, **kwargs): + formals, tensors, actuals = get_script_args(args) + call = get_call(method_name, func_type, actuals, kwargs) + script = script_template.format(', '.join(formals), call) + CU = torch.jit.CompilationUnit(script) + return CU.the_method, tensors + +# create a script function from (name, func_type), +# returns a function takes in (args, kwargs) and runs the compiled function +def create_script_fn(self, method_name, func_type): + # function returns tuple containing original output and + # filtered output to be used in checking gradients + def script_fn(*args, **kwargs): + fn, tensors = gen_script_fn_and_args(method_name, func_type, *args, **kwargs) + self.assertExportImport(fn.graph, tensors) + output = fn(*tensors) + # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087 + script_fn.last_graph = fn.graph_for(*tensors) # type: ignore[attr-defined] + return output + return script_fn + +class SplitInputs: + all_tensors: List[Any] + tensor_args: List[Any] + nontensor_args: List[Any] + arg_types: List[str] + tensor_kwargs: Dict[str, Any] + kwarg_order: List[str] + nontensor_kwargs: Dict[str, Any] + kwarg_types: Dict[str, Any] + + @staticmethod + def _is_tensor_input(arg): + return isinstance(arg, torch.Tensor) or is_iterable_of_tensors(arg) + + def __init__(self, args, kwargs): + self.arg_types = ['t' if self._is_tensor_input(arg) else 's' for arg in args] + self.kwarg_types = {k: 't' if self._is_tensor_input(v) else 's' for k, v in kwargs.items()} + self.tensor_args = [arg for arg in args if self._is_tensor_input(arg)] + self.nontensor_args = [arg for arg in args if not self._is_tensor_input(arg)] + self.tensor_kwargs = {k: v for k, v in kwargs.items() if self._is_tensor_input(v)} + self.nontensor_kwargs = {k: v for k, v in kwargs.items() if not self._is_tensor_input(v)} + self.all_tensors = [*self.tensor_args, *[v for k, v in self.tensor_kwargs.items()]] + self.kwarg_order = [k for k, v in kwargs.items()] + + def nontensors_match(self, other: 'SplitInputs'): + if self.arg_types != other.arg_types: + return False + if self.kwarg_types != other.kwarg_types: + return False + if self.kwarg_order != other.kwarg_order: + return False + if self.nontensor_args != other.nontensor_args: + return False + if self.nontensor_kwargs != other.nontensor_kwargs: + return False + return True + +# make a new function where all non-tensor arguments in 'args' have been partially +# applied, and all tensor arguments remain. +# used to trace functions when some arguments are not tensors +def partial_apply_nontensors(fn, args, kwargs): + inputs = SplitInputs(args, kwargs) + + def new_fn(*tensors_): + tensors = iter(tensors_) + full_args = [args[i] if s == 's' else next(tensors) for i, s in enumerate(inputs.arg_types)] + full_kwargs = {k: kwargs[k] if s == 's' else next(tensors) for k, s in inputs.kwarg_types.items()} + return fn(*full_args, **full_kwargs) + + return new_fn, inputs + +# create a trace function from input fn +def create_traced_fn(self, fn, cache_traced_fn=False): + def traced_fn(*inputs, **kwargs): + # `check_trace` is set to False because check_trace is run with @no_grad + # Also, `check_against_reference` already does all the checks + # against python function + fn_tensors, split_inputs = partial_apply_nontensors(fn, inputs, kwargs) + if not cache_traced_fn or not hasattr(traced_fn, 'traced'): + traced = torch.jit.trace(fn_tensors, split_inputs.all_tensors, check_trace=False) + self.assertExportImport(traced.graph, split_inputs.all_tensors) + output = traced(*split_inputs.all_tensors) + if cache_traced_fn: + traced_fn.traced = traced + traced_fn.split_inputs = split_inputs + else: + # Guard to check that nontensor inputs are the same as during tracing + self.assertTrue(traced_fn.split_inputs.nontensors_match(split_inputs)) + output = traced_fn.traced(*split_inputs.all_tensors) + traced = traced_fn.traced + # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087 + traced_fn.last_graph = traced.graph_for(*split_inputs.all_tensors) # type: ignore[attr-defined] + traced_fn.graph = traced.graph # type: ignore[attr-defined] + return output + return traced_fn + +# known to be failing in script +EXCLUDE_SCRIPT = { + 'test_norm_fro_default', + 'test_norm_fro_cpu', + 'test_norm_nuc', + 'test_norm_fro', + 'test_norm_nuc_batched', + + # aten op has additional cudnn argument + 'test_nn_unfold', + + # flaky test - TODO fix + 'test_nn_ctc_loss', + + # unknown builtin op + 'test_nn_fold', + + # jit doesn't support sparse tensors. + 'test_to_sparse', + 'test_to_sparse_dim', +} + +# generates a script function and set of example inputs +# from a specified test in the format of nn_functional_tests +def get_nn_functional_compiled_fn_and_inputs(name, self_size, args, variant_name='', *extra_args): + test_name = 'test_nn_' + name + + if variant_name != '': + test_name = test_name + '_' + variant_name + + no_grad = variant_name == 'inplace' + + self_variable = create_input((self_size,))[0][0] + kwargs = None + + # need to record this because methods can change the size (e.g. unsqueeze) + args_variable, kwargs_variable = create_input(args) + + self_tensor = deepcopy(self_variable.data) + args_tensor = deepcopy(unpack_variables(args_variable)) + + f_args_variable = (self_variable,) + args_variable + f_args_tensor = (self_tensor,) + args_tensor + with torch._jit_internal._disable_emit_hooks(): + script_fn, inputs = gen_script_fn_and_args(name, "nn_functional", *f_args_variable) + return script_fn, inputs + + +# additional modules test +# TODO: delete this list once we make all nn_tests work +additional_module_tests = [ + { + 'module_name': 'Bilinear', + 'constructor_args': (S, S, M), + 'input_size': (S, S), + 'extra_args': ((S, S),) + }, + { + 'module_name': 'RNNCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'LSTMCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'GRUCell', + 'constructor_args': (S, S), + 'input_size': (S, S), + }, + { + 'module_name': 'MultiheadAttention', + 'constructor_args': (128, 8), + 'input_size': (10, 8, 128), + 'extra_args': (torch.randn(10, 8, 128), torch.randn(10, 8, 128)), + 'slowTest': True + }, + { + 'module_name': 'Transformer', + 'constructor_args': (1, 1, 1, 1, 2), + 'input_size': (3, 1, 1), + 'extra_args': (torch.randn(1, 1, 1),), + 'slowTest': True + } +] + +EXCLUDE_SCRIPT_MODULES = { + 'test_nn_AdaptiveAvgPool2d_tuple_none', + 'test_nn_AdaptiveAvgPool3d_tuple_none', + 'test_nn_AdaptiveMaxPool2d_tuple_none', + 'test_nn_AdaptiveMaxPool3d_tuple_none', + + # Doesn't use future division, so this is not supported + 'test_nn_CrossMapLRN2d', + # Derivative for aten::_scaled_dot_product_flash_attention_backward is not implemented + 'test_nn_TransformerDecoderLayer_gelu_activation', + 'test_nn_TransformerDecoderLayer_relu_activation', + 'test_nn_TransformerEncoderLayer_gelu_activation', + 'test_nn_TransformerEncoderLayer_relu_activation', + 'test_nn_Transformer_multilayer_coder', +} + +script_method_template = ''' +def forward({}): + return {} +''' + +def create_script_module(self, nn_module, constructor_args, *args, **kwargs): + def script_module(*args, **kwargs): + formals, tensors, actuals = get_script_args(args) + + method_args = ', '.join(['self'] + actuals) + call_args_str = ', '.join(actuals) + call = f"self.submodule({call_args_str})" + script = script_method_template.format(method_args, call) + + submodule_constants = [] + if kwargs.get('is_constant'): + submodule_constants = ['submodule'] + + # Create module to use the script method + class TheModule(torch.jit.ScriptModule): + __constants__ = submodule_constants + + def __init__(self): + super().__init__() + self.submodule = nn_module(*constructor_args) + + def make_module(script): + module = TheModule() + # check __repr__ + str(module) + module.define(script) + return module + + module = make_module(script) + if self: + self.assertExportImportModule(module, tensors) + module(*args) + # skip type annotate function attributes for now, see: https://github.com/python/mypy/issues/2087 + create_script_module.last_graph = module.graph # type: ignore[attr-defined] + return module + return script_module + +def check_alias_annotation(method_name, args, kwargs, *, aten_name, func_type='method'): + formals, tensors, actuals = get_script_args(args) + call = get_call(method_name, func_type, actuals, kwargs) + script = script_template.format(', '.join(formals), call) + CU = torch.jit.CompilationUnit(script) + # to clean up IR + torch._C._jit_pass_inline(CU.the_method.graph) + torch._C._jit_pass_constant_propagation(CU.the_method.graph) + torch._C._jit_check_alias_annotation(CU.the_method.graph, tuple(tensors), aten_name) + +def get_nn_module_name_from_kwargs(**kwargs): + if 'module_name' in kwargs: + return kwargs['module_name'] + elif 'fullname' in kwargs: + return kwargs['fullname'] + elif 'constructor' in kwargs: + return kwargs['constructor'].__name__ + +def get_nn_mod_test_name(**kwargs): + if 'fullname' in kwargs: + test_name = kwargs['fullname'] + else: + test_name = get_nn_module_name_from_kwargs(**kwargs) + if 'desc' in kwargs: + test_name = f"{test_name}_{kwargs['desc']}" + return f'test_nn_{test_name}' + +def get_nn_module_class_from_kwargs(**kwargs): + name = get_nn_module_name_from_kwargs(**kwargs) + index = name.find("_") + if index == -1: + return name + else: + return name[0:name.find("_")] + +def try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs): + name = get_nn_module_name_from_kwargs(**kwargs) + + if 'desc' in kwargs and 'eval' in kwargs['desc']: + # eval() is not supported, so skip these tests + return + + test_name = name + if 'desc' in kwargs: + test_name = f"{test_name}_{kwargs['desc']}" + test_name = get_nn_mod_test_name(**kwargs) + + if test_name in EXCLUDE_SCRIPT_MODULES: + return + if 'constructor' in kwargs: + nn_module = kwargs['constructor'] + else: + nn_module = getattr(torch.nn, name) + + if "FunctionalModule" in str(nn_module): + return + + if 'constructor_args_fn' in kwargs: + constructor_args = kwargs['constructor_args_fn']() + else: + constructor_args = kwargs.get('constructor_args', ()) + + # Set up inputs from tuple of sizes or constructor fn + input_dtype = torch.double + if 'input_fn' in kwargs: + input = kwargs['input_fn']() + if isinstance(input, torch.Tensor): + input = (input,) + + if all(tensor.is_complex() for tensor in input): + input_dtype = torch.cdouble + else: + input = (kwargs['input_size'],) + + # Extra parameters to forward() + if 'extra_args' in kwargs: + input = input + kwargs['extra_args'] + + if 'target_size' in kwargs: + input = input + (kwargs['target_size'],) + elif 'target_fn' in kwargs: + if torch.is_tensor(input): + input = (input,) + input = input + (kwargs['target_fn'](),) + + args_variable, kwargs_variable = create_input(input, dtype=input_dtype) + f_args_variable = deepcopy(unpack_variables(args_variable)) + out_var = deepcopy(f_args_variable) + + args, mod = f_args_variable, create_script_module(None, nn_module, constructor_args, *f_args_variable)(*f_args_variable) + + return mod, out_var + + +def get_all_nn_module_tests(): + return module_tests + new_module_tests + additional_module_tests diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0c7da662dad4b4154b3b5265240962d7ead89c1a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/jit_utils.py @@ -0,0 +1,891 @@ +# Torch +from torch.autograd import Variable +from torch.autograd.function import _nested_map +from torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401 + +from torch.onnx import OperatorExportTypes +import torch +import torch.cuda +import torch.jit +import torch.jit._logging +import torch.jit.frontend +import torch.jit.quantized +import zipfile +import functools + +# Testing utils +from torch.testing import FileCheck +from torch.testing._internal.common_utils import IS_WINDOWS, \ + freeze_rng_state, enable_profiling_mode_for_profiling_tests, ProfilingMode, TEST_BAILOUTS, \ + is_iterable_of_tensors +from torch.testing._internal.common_jit import JitCommonTestCase +from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401 + +# Standard library +from contextlib import contextmanager +from functools import reduce +from io import StringIO +from collections import defaultdict + +import importlib.util +import inspect +import io +import math +import os +import pickle +import sys +import tempfile +import textwrap +from importlib.abc import Loader +from typing import Any, Dict, List, Tuple, Union + +RUN_CUDA = torch.cuda.is_available() +RUN_CUDA_MULTI_GPU = RUN_CUDA and torch.cuda.device_count() > 1 +RUN_CUDA_HALF = RUN_CUDA +# HIP supports half, no version check necessary +if torch.cuda.is_available() and not torch.version.hip: + CUDA_VERSION = torch._C._cuda_getCompiledVersion() + for d in range(torch.cuda.device_count()): + major = torch.cuda.get_device_capability(d)[0] + if (major < 6): + RUN_CUDA_HALF = False + +def execWrapper(code, glob, loc): + exec(code, glob, loc) + +def do_input_map(fn, input): + return _nested_map(lambda t: isinstance(t, torch.Tensor), fn)(input) + +def clear_class_registry(): + torch._C._jit_clear_class_registry() + torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() + torch.jit._state._clear_class_state() + +def get_execution_plan(graph_executor_state): + execution_plans = list(graph_executor_state.execution_plans.values()) + num_plans = len(execution_plans) + if num_plans != 1: + raise RuntimeError('This test assumes this GraphExecutor should ' + f'only have one execution plan, got: {num_plans}') + return execution_plans[0] + +class _AssertRaisesRegexWithHighlightContext: + """ + A context manager that is useful for checking that error messages highlight + the correct part of the source code. + """ + + def __init__(self, test_case, exception, regex, highlight): + self.test_case = test_case + self.exception_type = exception + self.regex = regex + self.highlight = highlight + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + with self.test_case.assertRaisesRegex(self.exception_type, self.regex): + if type: + raise value + + if self.highlight: + FileCheck().check_source_highlighted(self.highlight).run(str(value)) + + return True + +FUSION_GROUP = "prim::TensorExprGroup" + +class JitTestCase(JitCommonTestCase): + _do_cuda_memory_leak_check = True + _restored_warnings = False + + class capture_stdout(list): + """ + Replace sys.stdout with a temporary StringIO + """ + def __enter__(self): + self.sys_stdout = sys.stdout + self.stringio = StringIO() + sys.stdout = self.stringio + return self + + def __exit__(self, *args): + self.append(str(self.stringio.getvalue())) + del self.stringio + sys.stdout = self.sys_stdout + + class capture_stderr(list): + """ + Replace sys.stderr with a temporary StringIO + """ + def __enter__(self): + self.sys_stderr = sys.stderr + self.stringio = StringIO() + sys.stderr = self.stringio + return self + + def __exit__(self, *args): + self.append(str(self.stringio.getvalue())) + del self.stringio + sys.stderr = self.sys_stderr + + def setHooks(self): + torch._C._jit_set_emit_hooks(self.emitModuleHook, self.emitFunctionHook) + + def clearHooks(self): + torch._C._jit_set_emit_hooks(None, None) + + def setUp(self): + super().setUp() + # unittest overrides all warning filters and forces all of them to show up + # after we install our own to silence those coming from inside PyTorch. + # This will ensure that our filter still takes precedence. + if not JitTestCase._restored_warnings: + torch.jit.TracerWarning.ignore_lib_warnings() + JitTestCase._restored_warnings = True + self.setHooks() + + def tearDown(self): + super().tearDown() + # needs to be cleared because python might be unloaded before + # the callback gets destructed + self.clearHooks() + clear_class_registry() + + def assertAllFused(self, graph, except_for=()): + + # note this helper collects nodes on 'fast path' only + # i.e. the true blocks of specialized checks + def get_nodes_and_parents_recursively(block, kind, acc): + for node in block.nodes(): + if node.kind() == kind: + acc[block].append(node) + elif node.kind() == 'prim::DifferentiableGraph': + get_nodes_and_parents_recursively(node.g('Subgraph'), kind, acc) + elif node.kind() == 'prim::If' and (node.inputs().__next__().node().kind() == 'aten::all' or + node.inputs().__next__().node().kind() == 'prim::TypeCheck' or + node.inputs().__next__().node().kind() == 'prim::RequiresGradCheck'): + get_nodes_and_parents_recursively(node.blocks().__next__(), kind, acc) + else: + for inner_block in node.blocks(): + get_nodes_and_parents_recursively(inner_block, kind, acc) + + allowed_nodes = {'prim::Constant', FUSION_GROUP, 'prim::BailoutTemplate', + 'prim::TupleConstruct', 'prim::If', 'prim::TypeCheck', 'prim::RequiresGradCheck'} | set(except_for) + + fusion_groups : Dict[torch._C.Block, List[torch._C.Node]] = defaultdict(list) + get_nodes_and_parents_recursively(graph, FUSION_GROUP, fusion_groups) + self.assertTrue(len(fusion_groups) == 1, f'got {graph}') + (graph, fusion_nodes) = next(iter(fusion_groups.items())) + # the block contains one FUSION_GROUP and the rest of nodes are `allowed_nodes` + self.assertTrue(len(fusion_nodes) == 1, f'got {graph}') + self.assertTrue(all(node.kind() in allowed_nodes for node in graph.nodes()), + f'got {graph}') + + def _isHookExceptionOk(self, e): + se = str(e) + allowed = ("Could not export Python function", + "closures are not exportable") + for a in allowed: + if a in se: + return True + return False + + def _compared_saved_loaded(self, m): + def extract_files(buffer): + # crack open the zip format to get at the main module code + archive = zipfile.ZipFile(buffer) + # check that we have no duplicate names + self.assertEqual(len(set(archive.namelist())), len(archive.namelist())) + files = list(filter(lambda x: x.startswith('archive/code/'), archive.namelist())) + # unwrap all the code files into strings + code_files_str = filter(lambda x: x.endswith('.py'), files) + code_files_stream = (archive.open(f) for f in code_files_str) + code_files = ("".join([line.decode() for line in file]) for file in code_files_stream) + + # unpickled all the debug files + debug_files_str = filter(lambda f: f.endswith('.debug_pkl'), files) + debug_files_stream = (archive.open(f) for f in debug_files_str) + debug_files = (pickle.load(f) for f in debug_files_stream) + return code_files, debug_files + + # disable the hook while we parse code, otherwise we will re-enter the hook + with torch._jit_internal._disable_emit_hooks(): + try: + # short-circuit if this is an empty function or module + if len(m.code) == 0: + return + if isinstance(m, torch._C.ScriptModule): + if len(m._method_names()) == 0: + return + + # save the module to a buffer + buffer = io.BytesIO() + torch.jit.save(m, buffer) + # copy the data in the buffer so we can restore it later. This + # is because py2 and py3 have different semantics with zipfile + # and it's easier to just work with a fresh copy each time. + buffer_copy = buffer.getvalue() + + code_files, debug_files = extract_files(buffer) + + except RuntimeError as e: + if not self._isHookExceptionOk(e): + raise + else: + return + + # import the model again (from a the copy we made of the original) + buffer2 = io.BytesIO(buffer_copy) + imported = torch.jit.load(buffer2) + + # save it again + saved_module_buffer_2 = io.BytesIO() + torch.jit.save(imported, saved_module_buffer_2) + + saved_module_buffer_2.seek(0) + code_files_2, debug_files_2 = extract_files(saved_module_buffer_2) + + for a, b in zip(code_files, code_files_2): + self.assertMultiLineEqual(a, b) + + if isinstance(m, torch._C.ScriptModule): + self.assertTrue(torch._C._ivalue_tags_match(m, imported._c)) + + + def emitFunctionHook(self, func): + # func has invalid names for export, skip the jitter check + if func.name == "" or "aten::" in func.name: + return + self._compared_saved_loaded(func) + + def emitModuleHook(self, module): + self._compared_saved_loaded(module) + + + def getExportImportCopyWithPacking(self, m, also_test_file=True, map_location=None): + buffer = io.BytesIO() + m.apply(lambda s: s._pack() if s._c._has_method('_pack') else None) + torch.jit.save(m, buffer) + m.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None) + buffer.seek(0) + imported = torch.jit.load(buffer, map_location=map_location) + imported.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None) + + if not also_test_file: + return imported + + # Ideally we would like to not have to manually delete the file, but NamedTemporaryFile + # opens the file, and it cannot be opened multiple times in Windows. To support Windows, + # close the file after creation and try to remove it manually + f = tempfile.NamedTemporaryFile(delete=False) + try: + f.close() + imported.save(f.name) + result = torch.jit.load(f.name, map_location=map_location) + finally: + os.unlink(f.name) + + result.apply(lambda s: s._unpack() if s._c._has_method('_unpack') else None) + return result + + def assertGraphContains(self, graph, kind, consider_subgraphs=False): + + if consider_subgraphs: + strgraph = str(graph) + count = strgraph.count(kind) - strgraph.count(f'with {kind}') + self.assertTrue(count > 0) + return + + def nodes(block): + out = [] + for node in block.nodes(): + if node.kind() == kind: + out.append(node) + for block in node.blocks(): + out += nodes(block) + return out + + out_nodes = nodes(graph) + self.assertTrue(len(out_nodes) > 0) + + def assertGraphContainsExactly(self, graph, kind, num_kind_nodes, consider_subgraphs=False): + def perform_assert(graph, kind, actual, expected, consider_subgraphs): + if actual == expected: + return + subgraph = 'including' if consider_subgraphs else 'excluding' + raise AssertionError( + f'{graph}\nError: graph contains {actual} {kind} nodes ({subgraph} subgraphs) but expected {expected}') + + if consider_subgraphs: + strgraph = str(graph) + count = strgraph.count(kind) - strgraph.count(f'with {kind}') + perform_assert(graph, kind, count, num_kind_nodes, + consider_subgraphs) + return + + def nodes(block): + out = [] + for node in block.nodes(): + if node.kind() == kind: + out.append(node) + for block in node.blocks(): + out += nodes(block) + return out + + out_nodes = nodes(graph) + perform_assert(graph, kind, len(out_nodes), num_kind_nodes, + consider_subgraphs) + + def assertExpectedONNXGraph(self, g, *args, **kwargs): + g = torch.onnx._optimize_trace(g, operator_export_type=OperatorExportTypes.ONNX) + self.assertExpectedGraph(g, *args, **kwargs) + + def assertExpectedGraph(self, trace, *args, **kwargs): + if isinstance(trace, torch._C.Graph): + graph = trace + else: + graph = trace.graph() + + torch._C._jit_pass_lint(graph) + torch._C._jit_pass_dce(graph) + torch._C._jit_pass_lint(graph) + graph = torch._C._jit_pass_canonicalize(graph) + torch._C._jit_pass_lint(graph) + self.assertExpected(str(graph), *args, **kwargs) + + def run_pass(self, name, trace): + if isinstance(trace, torch._C.Graph): + graph = trace + set_graph = False + else: + set_graph = True + graph = trace.graph() + + torch._C._jit_pass_lint(graph) + result = getattr(torch._C, '_jit_pass_' + name)(graph) + if result is not None and not isinstance(result, bool): + graph = result + torch._C._jit_pass_lint(graph) + + if set_graph: + trace.set_graph(graph) + return graph + + def get_frame_vars(self, frames_up): + frame = inspect.currentframe() + if not frame: + raise RuntimeError("failed to inspect frame") + i = 0 + while i < frames_up + 1: + frame = frame.f_back + if not frame: + raise RuntimeError("failed to get frame") + i += 1 + defined_vars: Dict[str, Any] = {} + defined_vars.update(frame.f_locals) + defined_vars.update(frame.f_globals) + return defined_vars + + def assertRaisesRegexWithHighlight(self, exception, regex, highlight): + return _AssertRaisesRegexWithHighlightContext(self, exception, regex, highlight) + + def checkScriptRaisesRegex(self, script, inputs, exception, regex, + name=None, outputs=None, capture_output=False, + frames_up=1, profiling=ProfilingMode.PROFILING): + """ + Checks that a given function will throw the correct exception, + when executed with normal python, the string frontend, and the + AST frontend. Logic taken from `checkScript` (see comments there + for details) + """ + with enable_profiling_mode_for_profiling_tests(): + # Normal Python + with self.assertRaisesRegex(exception, regex): + if isinstance(script, str): + frame = self.get_frame_vars(frames_up) + the_locals: Dict[str, Any] = {} + execWrapper(script, glob=frame, loc=the_locals) + frame.update(the_locals) + + python_fn = frame[name] + else: + python_fn = script + + python_fn(*inputs) + + # String frontend + with self.assertRaisesRegex(exception, regex): + if isinstance(script, str): + cu = torch.jit.CompilationUnit(script, _frames_up=frames_up) + string_frontend = getattr(cu, name) + else: + source = textwrap.dedent(inspect.getsource(script)) + cu = torch.jit.CompilationUnit(source, _frames_up=frames_up) + string_frontend = getattr(cu, script.__name__) + + string_frontend(*inputs) + + # Python AST frontend + if not isinstance(script, str): + with self.assertRaisesRegex(exception, regex): + ge = torch.jit.script(python_fn) + ge(*inputs) + + def checkBailouts(self, model, inputs, expected): + state = model.get_debug_state() + plan = get_execution_plan(state) + num_bailouts = plan.code.num_bailouts() + for i in range(0, num_bailouts): + plan.code.request_bailout(i) + bailout_outputs = model(*inputs) + self.assertEqual(bailout_outputs, expected) + + def checkScript(self, + script, + inputs, + name='func', + optimize=True, + inputs_requires_grad=False, + capture_output=False, + frames_up=1, + profiling=ProfilingMode.PROFILING, + atol=None, + rtol=None): + """ + Checks that a given script generates the same output as the Python + version using the given inputs. + """ + with torch.jit.optimized_execution(optimize): + with enable_profiling_mode_for_profiling_tests(): + extra_profile_runs = any(isinstance(x, torch.Tensor) and x.requires_grad for x in inputs) + if isinstance(script, str): + # Compile the string to a Script function + # with enable_profiling_mode(): + cu = torch.jit.CompilationUnit(script, _frames_up=frames_up) + + # Execute the Python function so we can run it later and get its + # outputs + + frame = self.get_frame_vars(frames_up) + the_locals: Dict[str, Any] = {} + execWrapper(script, glob=frame, loc=the_locals) + frame.update(the_locals) + + python_fn = frame[name] + scripted_fn = getattr(cu, name) + else: + + # Check the string frontend first + source = textwrap.dedent(inspect.getsource(script)) + self.checkScript( + source, + inputs, + script.__name__, + optimize=optimize, + inputs_requires_grad=inputs_requires_grad, + capture_output=capture_output, + profiling=profiling, + frames_up=2) + + # Continue checking the Python frontend + scripted_fn = torch.jit.script(script, _frames_up=1) + python_fn = script + + if inputs_requires_grad: + recording_inputs = do_input_map(lambda t: t.detach().requires_grad_(), inputs) + else: + recording_inputs = inputs + + if capture_output: + with self.capture_stdout() as script_stdout: + script_outputs = scripted_fn(*recording_inputs) + with self.capture_stdout() as opt_script_stdout: + opt_script_outputs = scripted_fn(*recording_inputs) + with self.capture_stdout() as _python_stdout: + python_outputs = python_fn(*inputs) + if not IS_WINDOWS: + self.assertExpected(script_stdout[0], subname='stdout') + self.assertEqual(python_outputs, opt_script_outputs, atol=atol, rtol=rtol) + else: + # profiling run + script_outputs = scripted_fn(*recording_inputs) + if inputs_requires_grad or extra_profile_runs: + opt_script_outputs = scripted_fn(*recording_inputs) + # optimized run + opt_script_outputs = scripted_fn(*recording_inputs) + if TEST_BAILOUTS: + self.checkBailouts(scripted_fn, inputs, opt_script_outputs) + python_outputs = python_fn(*inputs) + self.assertEqual(python_outputs, script_outputs, atol=atol, rtol=rtol) + self.assertEqual(script_outputs, opt_script_outputs, atol=atol, rtol=rtol) + return scripted_fn + + def checkTrace(self, func, reference_tensors, input_tensors=None, + drop=None, allow_unused=False, verbose=False, + inputs_require_grads=True, check_tolerance=1e-5, export_import=True, + _force_outplace=False, grad_atol=None, grad_rtol=None): + + # TODO: check gradients for parameters, not just inputs + def allSum(vs): + # drop allows us to remove some values from ever being used + # to test unused outputs + if drop is not None: + vs = vs[:-drop] + # we don't want all the grad for all the outputs to be the same + # so we multiply each by a constant + return sum(math.log(i + 2) * v.sum() for i, v in enumerate(vs) if v is not None) + if input_tensors is None: + input_tensors = reference_tensors + + def flatten_inputs(inputs): + def input_reduce(input, fn, acc): + if isinstance(input, torch.Tensor): + fn(input, acc) + elif isinstance(input, dict): + reduce(lambda acc, key: input_reduce(input[key], fn, acc), input, acc) + else: + reduce(lambda acc, val: input_reduce(val, fn, acc), input, acc) + return acc + return tuple(input_reduce(recording_inputs, lambda t, acc: acc.append(t), [])) + + nograd_inputs = reference_tensors + if inputs_require_grads: + recording_inputs = do_input_map(lambda t: t.clone().requires_grad_(), reference_tensors) + flattened_recording_inputs = flatten_inputs(recording_inputs) + else: + recording_inputs = reference_tensors + + # `check_trace` is set to False because check_trace is run with @no_grad + # Also, `checkTrace` already does all the checks + # against python function + ge = torch.jit.trace(func, input_tensors, check_tolerance=check_tolerance, + _force_outplace=_force_outplace, check_trace=False) + + if export_import: + ge = self.getExportImportCopy(ge) + + if verbose: + print(ge.graph) + + # test no gradients case + outputs = func(*nograd_inputs) + outputs_ge = ge(*nograd_inputs) + self.assertEqual(outputs, outputs_ge) + + # test gradients case + outputs = func(*recording_inputs) + if inputs_require_grads: + grads = torch.autograd.grad(allSum(outputs), flattened_recording_inputs, + allow_unused=allow_unused) + + outputs_ge = ge(*recording_inputs) + if inputs_require_grads: + grads_ge = torch.autograd.grad(allSum(outputs_ge), flattened_recording_inputs, + allow_unused=allow_unused) + self.assertEqual(outputs, outputs_ge) + if inputs_require_grads: + self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol) + + # test the grad grad case + outputs = func(*recording_inputs) + l1 = allSum(outputs) + if inputs_require_grads: + grads = torch.autograd.grad(l1, flattened_recording_inputs, create_graph=True, + allow_unused=allow_unused) + if inputs_require_grads: + l2 = (allSum(grads) * l1) + grads2 = torch.autograd.grad(l2, flattened_recording_inputs, allow_unused=allow_unused) + + if inputs_require_grads: + recording_inputs = do_input_map(lambda t: Variable(t, requires_grad=True), reference_tensors) + flattened_recording_inputs = flatten_inputs(recording_inputs) + + outputs_ge = ge(*recording_inputs) + l1_ge = allSum(outputs_ge) + if inputs_require_grads: + grads_ge = torch.autograd.grad( + l1_ge, flattened_recording_inputs, create_graph=True, allow_unused=allow_unused) + + if inputs_require_grads: + l2_ge = (allSum(grads_ge) * l1_ge) + grads2_ge = torch.autograd.grad(l2_ge, flattened_recording_inputs, allow_unused=allow_unused) + + self.assertEqual(outputs, outputs_ge) + if inputs_require_grads: + self.assertEqual(grads, grads_ge, atol=grad_atol, rtol=grad_rtol) + for g2, g2_ge in zip(grads2, grads2_ge): + if g2 is None and g2_ge is None: + continue + self.assertEqual(g2, g2_ge, atol=8e-4, rtol=8e-4) + + return ge + + def checkModule(self, nn_module, args): + """ + Check that a nn.Module's results in Script mode match eager and that it + can be exported + """ + sm = torch.jit.script(nn_module) + + with freeze_rng_state(): + eager_out = nn_module(*args) + + with freeze_rng_state(): + script_out = sm(*args) + + self.assertEqual(eager_out, script_out) + self.assertExportImportModule(sm, args) + + return sm + +class NoTracerWarnContextManager: + def __enter__(self): + self.prev = torch._C._jit_get_tracer_state_warn() + torch._C._jit_set_tracer_state_warn(False) + + def __exit__(self, *args): + torch._C._jit_set_tracer_state_warn(self.prev) + +@contextmanager +def inline_everything_mode(should_inline): + old = torch._C._jit_get_inline_everything_mode() + torch._C._jit_set_inline_everything_mode(should_inline) + try: + yield + finally: + torch._C._jit_set_inline_everything_mode(old) + +@contextmanager +def set_fusion_group_inlining(inlining): + old = torch._C._debug_get_fusion_group_inlining() + torch._C._debug_set_fusion_group_inlining(inlining) + try: + yield + finally: + torch._C._debug_set_fusion_group_inlining(old) + +# note: not re-entrant, use unnested only +@contextmanager +def disable_autodiff_subgraph_inlining(enabled=True): + torch._C._debug_set_autodiff_subgraph_inlining(not enabled) + try: + yield + finally: + torch._C._debug_set_autodiff_subgraph_inlining(True) + +def _inline_everything(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + with inline_everything_mode(True): + fn(*args, **kwargs) + return wrapper + +# this exists for forward compatibility reasons temporarily. +# TODO(suo) remove +def _tmp_donotuse_dont_inline_everything(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + with inline_everything_mode(False): + fn(*args, **kwargs) + return wrapper + +# make it easy to quicky define/trace a function for these tests +def _trace(*args, **kwargs): + def wrapper(func): + return torch.jit.trace(func, args, **kwargs) + return wrapper + + +def enable_cpu_fuser(fn): + def wrapper(*args, **kwargs): + torch._C._jit_override_can_fuse_on_cpu_legacy(True) + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_set_te_must_use_llvm_cpu(False) + try: + fn(*args, **kwargs) + finally: + torch._C._jit_override_can_fuse_on_cpu_legacy(False) + torch._C._jit_override_can_fuse_on_cpu(False) + torch._C._jit_set_te_must_use_llvm_cpu(True) + return wrapper + + +def enable_cpu_fuser_if(cond): + if cond: + return enable_cpu_fuser + else: + def noop_fuser(fn): + def wrapper(*args, **kwargs): + return fn(*args, **kwargs) + return wrapper + return noop_fuser + +def get_forward(c): + return c._get_method('forward') + +def get_forward_graph(c): + return c._get_method('forward').graph + +def get_module_method(m, module, method): + return m._c.getattr(module)._get_method(method) + +def attrs_with_prefix(module, prefix): + return [x for x, _ in module._modules._c.items() + if x.startswith(prefix)] + +def warmup_backward(f, *args): + profiling_count = 3 + results = [] + for i in range(profiling_count): + if len(args) > 0: + r = torch.autograd.grad(f, *args) + results.append(r) + else: + f.backward(retain_graph=True) + + return results + +# TODO: Remove me once https://bugs.python.org/issue42666 is resolved +def make_global(*args): + for arg in args: + setattr(sys.modules[arg.__module__], arg.__name__, arg) + +# Helper function to eval Python3 code without causing a syntax error for +# this file under py2 +def _get_py3_code(code, fn_name): + with tempfile.TemporaryDirectory() as tmp_dir: + script_path = os.path.join(tmp_dir, 'script.py') + with open(script_path, 'w') as f: + f.write(code) + spec = importlib.util.spec_from_file_location(fn_name, script_path) + module = importlib.util.module_from_spec(spec) + loader = spec.loader + assert isinstance(loader, Loader) # Assert type to meet MyPy requirement + loader.exec_module(module) + fn = getattr(module, fn_name) + return fn + +class TensorExprTestOptions: + def __init__(self): + self.old_profiling_executor = torch._C._jit_set_profiling_executor(True) + self.old_profiling_mode = torch._C._get_graph_executor_optimize(True) + + self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu() + self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu() + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(True) + self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled() + torch._C._jit_set_texpr_fuser_enabled(True) + self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining() + torch._C._debug_set_fusion_group_inlining(False) + self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu() + torch._C._jit_set_te_must_use_llvm_cpu(False) + + def restore(self): + torch._C._jit_set_profiling_executor(self.old_profiling_executor) + torch._C._get_graph_executor_optimize(self.old_profiling_mode) + + torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state) + torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuser_state) + torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuser_state) + torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining) + torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu) + +def clone_inputs(args): + inputs: List[Union[torch.Tensor, List[torch.Tensor]]] = [] + + for arg in args: + if isinstance(arg, torch.Tensor): + inputs.append(arg.detach().clone()) + elif is_iterable_of_tensors(arg): + inputs.append([t.detach().clone() for t in arg]) + else: + inputs.append(arg) + + return inputs + +def get_traced_sample_variant_pairs(device, dtype, op): + # tuples of (variant, sample) + outputs: List[Tuple[Any, Any]] = [] + + samples = op.sample_inputs(device, dtype) + + # Acquires variants to test + func = op.get_op() + method = op.get_method() + variants = { + # TODO: inplace tests currently fail, fix and add inplace variant + 'function': func, 'method': method, + } + + # TODO: find better way to standardize on op registration itself.. + has_fake_function = op.name in ["resize_", 'resize_as_'] + + if has_fake_function: + variants = {'method': getattr(torch.Tensor, op.name)} + + # In eager mode, these ops can take (Tensor, bool) args; but in + # JIT they can only take (Tensor, Scalar), and bool is not a + # scalar in the JIT type system. So to test these in JIT, the bool + # is converted to an int for the test. + ops_with_unsupported_bool_args = [ + { + "name": "div_floor_rounding", + "arg_idx": [0], + }, + { + "name": "div_no_rounding_mode", + "arg_idx": [0], + }, + { + "name": "div_trunc_rounding", + "arg_idx": [0], + }, + { + "name": "index_fill", + "arg_idx": [2], + }, + { + "name": "full_like", + "arg_idx": [0], + }, + { + "name": "mul", + "arg_idx": [0], + }, + { + "name": "new_full", + "arg_idx": [1], + }, + ] + + # doesn't support tracing + if has_fake_function: + return outputs + + for sample in samples: + for variant in variants.values(): + if variant is None: + continue + + if is_lambda(variant): + continue + + matching_ops = filter(lambda x: op.formatted_name == x["name"], ops_with_unsupported_bool_args) + for op_data in matching_ops: + for idx in op_data["arg_idx"]: + args = list(sample.args) + if len(sample.args) > idx and isinstance(sample.args[idx], bool): + args[idx] = int(args[idx]) + sample.args = tuple(args) + + outputs.append((variant, sample)) + + return outputs + +# types.LambdaType gave false positives +def is_lambda(lamb): + LAMBDA = lambda: 0 # noqa: E731 + return isinstance(lamb, type(LAMBDA)) and lamb.__name__ == LAMBDA.__name__ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/logging_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/logging_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e1d3f0669b1d69f64d1d521dba05cf8e062f7738 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/logging_utils.py @@ -0,0 +1,179 @@ +import torch._dynamo.test_case +import unittest.mock +import os +import contextlib +import torch._logging +import torch._logging._internal +from torch._dynamo.utils import LazyString +import logging + +@contextlib.contextmanager +def preserve_log_state(): + prev_state = torch._logging._internal._get_log_state() + torch._logging._internal._set_log_state(torch._logging._internal.LogState()) + try: + yield + finally: + torch._logging._internal._set_log_state(prev_state) + torch._logging._internal._init_logs() + +def log_settings(settings): + exit_stack = contextlib.ExitStack() + settings_patch = unittest.mock.patch.dict(os.environ, {"TORCH_LOGS": settings}) + exit_stack.enter_context(preserve_log_state()) + exit_stack.enter_context(settings_patch) + torch._logging._internal._init_logs() + return exit_stack + +def log_api(**kwargs): + exit_stack = contextlib.ExitStack() + exit_stack.enter_context(preserve_log_state()) + torch._logging.set_logs(**kwargs) + return exit_stack + + +def kwargs_to_settings(**kwargs): + INT_TO_VERBOSITY = {10: "+", 20: "", 40: "-"} + + settings = [] + + def append_setting(name, level): + if isinstance(name, str) and isinstance(level, int) and level in INT_TO_VERBOSITY: + settings.append(INT_TO_VERBOSITY[level] + name) + return + else: + raise ValueError("Invalid value for setting") + + for name, val in kwargs.items(): + if isinstance(val, bool): + settings.append(name) + elif isinstance(val, int): + append_setting(name, val) + elif isinstance(val, dict) and name == "modules": + for module_qname, level in val.items(): + append_setting(module_qname, level) + else: + raise ValueError("Invalid value for setting") + + return ",".join(settings) + + +# Note on testing strategy: +# This class does two things: +# 1. Runs two versions of a test: +# 1a. patches the env var log settings to some specific value +# 1b. calls torch._logging.set_logs(..) +# 2. patches the emit method of each setup handler to gather records +# that are emitted to each console stream +# 3. passes a ref to the gathered records to each test case for checking +# +# The goal of this testing in general is to ensure that given some settings env var +# that the logs are setup correctly and capturing the correct records. +def make_logging_test(**kwargs): + def wrapper(fn): + def test_fn(self): + + torch._dynamo.reset() + records = [] + # run with env var + if len(kwargs) == 0: + with self._handler_watcher(records): + fn(self, records) + else: + with log_settings(kwargs_to_settings(**kwargs)), self._handler_watcher(records): + fn(self, records) + + # run with API + torch._dynamo.reset() + records.clear() + with log_api(**kwargs), self._handler_watcher(records): + fn(self, records) + + + return test_fn + + return wrapper + +def make_settings_test(settings): + def wrapper(fn): + def test_fn(self): + torch._dynamo.reset() + records = [] + # run with env var + with log_settings(settings), self._handler_watcher(records): + fn(self, records) + + return test_fn + + return wrapper + +class LoggingTestCase(torch._dynamo.test_case.TestCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls._exit_stack.enter_context( + unittest.mock.patch.dict(os.environ, {"___LOG_TESTING": ""}) + ) + cls._exit_stack.enter_context( + unittest.mock.patch("torch._dynamo.config.suppress_errors", True) + ) + cls._exit_stack.enter_context( + unittest.mock.patch("torch._dynamo.config.verbose", False) + ) + + @classmethod + def tearDownClass(cls): + cls._exit_stack.close() + torch._logging._internal.log_state.clear() + torch._logging._init_logs() + + def getRecord(self, records, m): + record = None + for r in records: + # NB: not r.msg because it looks like 3.11 changed how they + # structure log records + if m in r.getMessage(): + self.assertIsNone( + record, + msg=LazyString( + lambda: f"multiple matching records: {record} and {r} among {records}" + ), + ) + record = r + if record is None: + self.fail(f"did not find record with {m} among {records}") + return record + + # This patches the emit method of each handler to gather records + # as they are emitted + def _handler_watcher(self, record_list): + exit_stack = contextlib.ExitStack() + + def emit_post_hook(record): + nonlocal record_list + record_list.append(record) + + # registered logs are the only ones with handlers, so patch those + for log_qname in torch._logging._internal.log_registry.get_log_qnames(): + logger = logging.getLogger(log_qname) + num_handlers = len(logger.handlers) + self.assertLessEqual( + num_handlers, + 2, + "All pt2 loggers should only have at most two handlers (debug artifacts and messages above debug level).", + ) + + self.assertGreater(num_handlers, 0, "All pt2 loggers should have more than zero handlers") + + for handler in logger.handlers: + old_emit = handler.emit + + def new_emit(record): + old_emit(record) + emit_post_hook(record) + + exit_stack.enter_context( + unittest.mock.patch.object(handler, "emit", new_emit) + ) + + return exit_stack diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..350094d85fcc5ffe56059940506ed3fc17bb2179 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__init__.py @@ -0,0 +1,5 @@ +from .make_fx import make_fx_check +from .aot_autograd import aot_autograd_check, _test_aot_autograd_forwards_backwards_helper +from .fake_tensor import fake_check +from .autograd_registration import autograd_registration_check +from .generate_tests import generate_opcheck_tests, opcheck, OpCheckError, dontGenerateOpCheckTests, is_inside_opcheck_mode diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e867adf873e5b299232c8a327ee3987643b3d29 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/generate_tests.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/generate_tests.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22793a2c14778a3e2af32e444195f51b7795724a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/__pycache__/generate_tests.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/aot_autograd.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/aot_autograd.py new file mode 100644 index 0000000000000000000000000000000000000000..4036f7a44f1b193056c68c29fad35e5555fe8238 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/aot_autograd.py @@ -0,0 +1,144 @@ +import torch +import torch.utils._pytree as pytree +from torch.testing._internal.common_methods_invocations import wrapper_set_seed +from functorch.compile import compiled_function, min_cut_rematerialization_partition, nop +from .make_fx import randomize +import re + + +class assert_raises_regex: + def __init__(self, exception_cls, regex): + self.exception_cls = exception_cls + self.regex = regex + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, traceback): + if exc_type == self.exception_cls: + msg = str(exc_val) + if not re.search(self.regex, msg): + raise AssertionError( + f"Expected exception to match regex. regex: {self.regex}, exception: {msg}") + return True # Squashes the exception + if exc_type is not None: + raise AssertionError( + f"Expected {self.exception_cls} to be raised, instead got exception {exc_type}") + raise AssertionError("Expected exception to be raised but none was") + + +def aot_autograd_check( + func, + args, + kwargs, + dynamic, + assert_raises_regex_fn=assert_raises_regex, + assert_equals_fn=torch.testing._comparison.assert_close, + check_gradients=True, + try_check_data_specialization=False): + """Compares func(*args, **kwargs) in eager-mode to under AOTAutograd. + + Compares outputs and (if check_gradients=True) gradients produced by + AOTAutograd against eager-mode PyTorch. + + We assume that func(*args, **kwargs) succeeds in eager-mode PyTorch. + + """ + flat_args, args_spec = pytree.tree_flatten((args, kwargs)) + args_is_tensor = [isinstance(arg, torch.Tensor) for arg in flat_args] + args = [arg for arg in flat_args if isinstance(arg, torch.Tensor)] + + # We construct a new function that only accepts Tensors as inputs + def func_no_tensors(args): + reconstructed_flat_args = [] + args = iter(args) + for v in flat_args: + if isinstance(v, torch.Tensor): + reconstructed_flat_args.append(next(args)) + else: + reconstructed_flat_args.append(v) + + c_args, c_kwargs = pytree.tree_unflatten(reconstructed_flat_args, args_spec) + return func(*c_args, **c_kwargs) + + compiled_f = compiled_function( + func_no_tensors, nop, nop, dynamic=dynamic, partition_fn=min_cut_rematerialization_partition) + + out = wrapper_set_seed(func_no_tensors, args) + if check_gradients == "auto": + any_tensor_requires_grad = pytree.tree_any_only(torch.Tensor, lambda x: x.requires_grad, args) + any_output_requires_grad = pytree.tree_any_only(torch.Tensor, lambda x: x.requires_grad, out) + check_gradients = any_tensor_requires_grad and any_output_requires_grad + if not check_gradients: + compiled_out = wrapper_set_seed(compiled_f, args) + assert_equals_fn(compiled_out, out, msg=outputs_msg) + return + _test_aot_autograd_forwards_backwards_helper( + func_no_tensors, compiled_f, args, assert_raises_regex_fn, assert_equals_fn, + try_check_data_specialization) + +outputs_msg = ( + "Outputs of the operator are different in eager-mode PyTorch vs " + "AOTAutograd. This means the operator will have incorrect output " + "underneath torch.compile. This could be because the operator's " + "implementation not traceable or that there is a bug in AOTAutograd." +) + + +def _test_aot_autograd_forwards_backwards_helper( + f, compiled_f, args, assert_raises_regex_fn, assert_equals_fn, + try_check_data_specialization): + # Verify grads are equal between compiled and non-compiled versions of f. + + def call_forwards_backwards(f, args): + flat_args = pytree.arg_tree_leaves(*args) + diff_args = [arg for arg in flat_args if isinstance(arg, torch.Tensor) and + arg.requires_grad] + out = wrapper_set_seed(f, args) + flat_out = pytree.tree_leaves(out) + + sm = 0 + for i in flat_out: + if isinstance(i, torch.Tensor): + # We need to call .abs() because it is possible that the output of the + # operator is a complex Tensor and autograd will yell at autograd.grad + # on a complex Tensor unless we manually provide the grad_output flag. + sm += i.sum().abs() + assert isinstance(sm, torch.Tensor) + return out, torch.autograd.grad(sm, diff_args, allow_unused=True) + + def check(args, ignore_failure=False): + try: + orig_out, orig_grad = call_forwards_backwards(f, args) + except Exception: + if ignore_failure: + return + raise + + # See https://github.com/pytorch/pytorch/pull/98960#issuecomment-1505962215 + if all(x is None for x in orig_grad): + with assert_raises_regex_fn(RuntimeError, 'does not require grad and does not have a grad_fn'): + call_forwards_backwards(compiled_f, args) + return + + msg = ( + "Gradients of the operator are different in eager-mode PyTorch vs " + "AOTAutograd. This means the operator will have incorrect gradients " + "underneath torch.compile. This could be because the operator's " + "backward is incorrectly registered or not traceable or that there " + "is a bug in AOTAutograd." + ) + + compiled_out, compiled_grad = call_forwards_backwards(compiled_f, args) + assert_equals_fn(compiled_out, orig_out, msg=outputs_msg) + assert_equals_fn(compiled_grad, orig_grad, msg=msg) + + check(args, ignore_failure=False) + + # Randomize the data and run the traced graph with it, to catch bugs + # where we may have baked in Tensor data into the trace. + # This is not guaranteed to succeed, because `f` might have preconditions + # on the values of the inputs, so we just ignore if this test fails. + if try_check_data_specialization: + args = randomize(args) + check(args, ignore_failure=True) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/autograd_registration.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/autograd_registration.py new file mode 100644 index 0000000000000000000000000000000000000000..800e15aa1ecd11b9a81503c58ea40dac9e808a57 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/autograd_registration.py @@ -0,0 +1,130 @@ +import contextlib + +import torch +import torch.utils._pytree as pytree + + +@contextlib.contextmanager +def set_autograd_fallback_mode(mode): + prev = torch._C._get_autograd_fallback_mode() + try: + torch._C._set_autograd_fallback_mode(mode) + yield + finally: + torch._C._set_autograd_fallback_mode(prev) + + +def autograd_registration_check(op, args, kwargs): + """Check if autograd was registered correctly (for the operator). + + Operators should have "autograd support" registered directly to an + autograd dispatch key. + An incorrect registration may lead to unexpected silent incorrectness. + Note that this check won't catch all problems but will catch + the most common ones. + + Example usage: + >>> x = torch.randn(3, requires_grad=True) + >>> autograd_registration_check(torch.ops.aten.sin.default, (x,), {}) + + Here are some best practices if you do find your autograd is + registered incorrectly: + - If the operator is composite (i.e. consists of other PyTorch ops) + and you wish the operator to decompose and get autograd support + that way, then please register the implementation to + DispatchKey::CompositeImplicitAutograd + - If you're adding an autograd formula for the operator, the correct + thing to do is to register an autograd.Function to + DispatchKey::Autograd (preferred) or one of the + DispatchKey::Autograd keys. It is NOT OK to register + an autograd.Function to a backend (e.g. CPU/CUDA) key. + - If your operator is non-differentiable, then you should register + an implementation to the Autograd key that uses + AutoDispatchBelowAutograd and re-invokes the operator. + + """ + assert isinstance(op, torch._ops.OpOverload) + # Implementation details + # ----------------------------------------------- + # If an operator doesn't have an autograd kernel at an autograd key, + # and the operator does not return inputs as-is, then all of + # the outputs should have requires_grad=False before we apply + # special behaviors of our default autograd fallback. + # (The default autograd fallback may set requires_grad=True on output + # tensors in certain modes so that when they are backpropped through, + # they raise an error). + # + # Our strategy for detecting if an operator doesn't have an autograd + # kernel at the autograd key is: + # - set the autograd fallback mode to "nothing" (so it does not change + # the required-gradness of outputs) + # - run the operator + # - Check if any outputs of the operator (that are not inputs) require + # grad. This would only happen if the user calls regular PyTorch + # operations in their backend key (this op should instead be + # CompositeImplicitAutograd or not an op) or if the user invokes + # an autograd.Function in the backend key. + # + # Note that it's already likely a bug if the operator directly returns + # an input as output (because custom ops don't have a good way of + # constructing true in-place or out variants), but we defer that + # responsibility to a different test (schema_check). + + flat_args = pytree.arg_tree_leaves(*args, **kwargs) + all_tensors = [arg for arg in flat_args if isinstance(arg, torch.Tensor)] + if not any(t.requires_grad for t in all_tensors): + raise RuntimeError( + "autograd_registration_check: no inputs have requires_grad=True so " + "we are unable to actually perform this test. Please pass inputs " + "that do require grad." + ) + + # Determine which AutogradBACKEND key to check + all_device_types = {arg.device.type for arg in all_tensors} + if not all_device_types.issubset(["cpu", "cuda"]): + # Don't want to support other keys yet + raise NotImplementedError( + f"autograd_registration_check: NYI devices other than CPU/CUDA, got {all_device_types}" + ) + if "cuda" in all_device_types: + key = "AutogradCUDA" + elif "cpu" in all_device_types: + key = "AutogradCPU" + + if torch._C._dispatch_has_kernel_for_dispatch_key(op.name(), key): + return + if torch._C._dispatch_has_kernel_for_dispatch_key(op.name(), "Autograd"): + return + if torch._C._dispatch_has_kernel_for_dispatch_key( + op.name(), "CompositeImplicitAutograd" + ): + return + + # At this point, we know the operator doesn't have a kernel registered to an + # autograd key. Let's proceed with our test. + with set_autograd_fallback_mode("nothing"): + all_outs = op(*args, **kwargs) + + inp_ids = {id(arg) for arg in flat_args} + + def not_an_input_and_requires_grad(tensor): + if not tensor.requires_grad: + return False + if id(tensor) in inp_ids: + return False + return True + + if not pytree.tree_any_only(torch.Tensor, not_an_input_and_requires_grad, all_outs): + return + + raise AssertionError( + f"{op.name()}: at least one output of this operator has requires_grad=True " + f"but the operator does not have an autograd kernel defined at an autograd " + f"key (e.g. DispatchKey::Autograd). This could mean that you have " + f"incorrectly registered an autograd kernel to a non-Autograd DispatchKey, " + f"which may lead to silently incorrect results. If your operator consists " + f"of regular PyTorch operations, consider not using an operator at all " + f"or registering your operator as CompositeImplicitAutograd. If you have " + f"an autograd.Function registered to a backend (CPU/CUDA) key, the correct " + f"location for it is the Autograd key." + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/fake_tensor.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/fake_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..55f0df2682b6e3f03bf32eac512cea6547493e5f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/fake_tensor.py @@ -0,0 +1,10 @@ +import torch._subclasses + + +def is_builtin(op): + return op.namespace in ('aten', 'prims', 'prim') + + +def fake_check(op, args, kwargs): + with torch._subclasses.CrossRefFakeMode(ignore_op_fn=is_builtin): + op(*args, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/generate_tests.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/generate_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..de92b5b7768b851abb80f163ca7ad2aa5a7b6000 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/generate_tests.py @@ -0,0 +1,851 @@ +import datetime +import difflib +import functools +import inspect +import json +import os +import re +import tempfile +import threading +import unittest +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch + +import torch._dynamo + +import torch.utils._pytree as pytree +from torch._dynamo.utils import clone_input +from torch._subclasses.schema_check_mode import SchemaCheckMode +from torch._utils_internal import get_file_path_2 +from torch.overrides import TorchFunctionMode +from torch.testing._internal.optests import ( + aot_autograd_check, + autograd_registration_check, + fake_check, +) + + +def dontGenerateOpCheckTests(reason: str): + def inner(fun): + fun._torch_dont_generate_opcheck_tests = True + return fun + + return inner + + +def is_abstract(tensor: torch.Tensor) -> bool: + if tensor.is_meta: + return True + if torch._subclasses.fake_tensor.is_fake(tensor): + return True + return False + + +def safe_schema_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + copy_inputs: bool = True, +) -> Any: + if copy_inputs: + args, kwargs = deepcopy_tensors((args, kwargs)) + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return None + with SchemaCheckMode(): + result = op(*args, **kwargs) + return result + + +def safe_autograd_registration_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + copy_inputs: bool = True, +) -> None: + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return + if copy_inputs: + args, kwargs = deepcopy_tensors((args, kwargs)) + # Don't perform autograd_registration_check if none of the inputs require grad. + if not pytree.tree_any_only( + torch.Tensor, lambda x: x.requires_grad, (args, kwargs) + ): + return + return autograd_registration_check(op, args, kwargs) + + +def safe_fake_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + copy_inputs: bool = True, +) -> None: + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return None + if copy_inputs: + args, kwargs = deepcopy_tensors((args, kwargs)) + return fake_check(op, args, kwargs) + + +def safe_aot_autograd_check( + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + dynamic: bool, + *, + copy_inputs: bool = True, +) -> Any: + # NB: copy_inputs does nothing for aot_autograd_check: it always needs to copy + # inputs. + if pytree.tree_any_only(torch.Tensor, is_abstract, (args, kwargs)): + return None + + def func(*args, **kwargs): + args, kwargs = pytree.tree_map_only(torch.Tensor, torch.clone, (args, kwargs)) + return op(*args, **kwargs) + + # aot_autograd_check runs func(*args, **kwargs) multiple times + # and assumes `func` does not modify its inputs. + return aot_autograd_check(func, args, kwargs, dynamic, check_gradients="auto") + + +def deepcopy_tensors(inputs: Any) -> Any: + return pytree.tree_map_only(torch.Tensor, clone_input, inputs) + + +# Test util requirements +# - The test util must have signature (op: OpOverload, args, kwargs) +# - The test util must NOT mutate args, kwargs. +# - The test utils in this list must not be prefixes of each other. For example, +# having both "test_schema" and "test_schema_is_functional" is NOT OK. +# - The order of items in this dict matters (for opcheck), we'll run them +# in order. +ALL_TEST_UTILS = { + "test_schema": safe_schema_check, + "test_autograd_registration": safe_autograd_registration_check, + "test_faketensor": safe_fake_check, + "test_aot_dispatch_static": functools.partial( + safe_aot_autograd_check, + dynamic=False, + ), + "test_aot_dispatch_dynamic": functools.partial( + safe_aot_autograd_check, + dynamic=True, + ), +} + +GDOC = "https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit" + +DEFAULT_TEST_UTILS = [ + "test_schema", + "test_autograd_registration", + "test_faketensor", + "test_aot_dispatch_static", + "test_aot_dispatch_dynamic", +] + + +def generate_opcheck_tests( + testcase: Any, + namespaces: List[str], + failures_dict_path: Optional[str] = None, + additional_decorators: Dict[str, Callable] = None, + test_utils: List[str] = DEFAULT_TEST_UTILS, +) -> None: + """Given an existing TestCase, use the existing tests to generate + additional validation tests for custom operators. + + For {all existing tests in the TestCase} x {all test utils}, + we will generate one new test. The new test runs a TorchFunctionMode + that intercepts ``op(*args, **kwargs)`` calls and invokes + ``test_util(op, *args, **kwargs)``, where ``op`` is an operator. + + The test_util that we support are in ALL_TEST_UTILS. They are: + - test_schema: This runs SchemaCheckMode. + - test_autograd_registration: This runs autograd_registration_check. + - test_faketensor: This runs CrossRefFakeMode. + - test_aot_dispatch_static: This runs aot_autograd_check, which: + checks that the outputs (and gradients, if they are computable) + are the same under eager-mode PyTorch and using AOTAutograd. + - test_aot_dispatch_dynamic: Same as aot_dispatch_static, but + runs AOTAutograd using dynamic shapes instead of static shapes. + + The generated test will have name ``{test_util}__{original_name}``. + For example, if there is a method named ``test_cumsum``, then + we will generate a ``test_schema__test_cumsum``, + ``test_faketensor__test_cumsum``, etc. + + For more details, see https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit + + Args: + testcase: The testcase we will modify and generate additional tests for. + namespaces: We will only intercept calls to custom operators with these + namespaces. + failures_dict_path: See ``validate_failures_dict_structure`` for more details + test_utils: a list of test_utils to generate. Example: ["test_schema", "test_faketensor"] + """ + if additional_decorators is None: + additional_decorators = {} + test_methods = [ + m + for m in dir(testcase) + if m.startswith("test_") and callable(getattr(testcase, m)) + ] + if failures_dict_path is None: + # The default failures_dict_path is failures_dict.json in + # the same directory as the test file. + prev_frame = inspect.currentframe().f_back + filename = inspect.getframeinfo(prev_frame)[0] + failures_dict_path = get_file_path_2( + os.path.dirname(filename), "failures_dict.json" + ) + failures_dict = FailuresDict.load( + failures_dict_path, create_file=should_update_failures_dict() + ) + validate_failures_dict_structure(failures_dict, test_utils, testcase) + validate_failures_dict_formatting(failures_dict_path) + + def construct_method(attr, prefix, tester): + method = getattr(testcase, attr) + if getattr(method, "_torch_dont_generate_opcheck_tests", False): + return + new_method_name = prefix + "__" + attr + + @functools.wraps(method) + def new_method(*args, **kwargs): + with OpCheckMode( + namespaces, + prefix, + tester, + failures_dict, + f"{testcase.__name__}.{new_method_name}", + failures_dict_path, + ): + result = method(*args, **kwargs) + return result + + if pytestmark := new_method.__dict__.get("pytestmark"): + import pytest + + # check if we need to simplify the parametrize marks + # NB: you need to add this mark to your pytest.ini + opcheck_only_one = False + for mark in pytestmark: + if isinstance(mark, pytest.Mark) and mark.name == "opcheck_only_one": + opcheck_only_one = True + + if opcheck_only_one: + new_pytestmark = [] + for mark in pytestmark: + if isinstance(mark, pytest.Mark) and mark.name == "parametrize": + argnames, argvalues = mark.args + assert not mark.kwargs, "NYI" + # Special case for device, we want to run on all + # devices + if argnames != "device": + new_pytestmark.append( + pytest.mark.parametrize( + argnames, (next(iter(argvalues)),) + ) + ) + continue + new_pytestmark.append(mark) + new_method.__dict__["pytestmark"] = new_pytestmark + + if new_method_name in additional_decorators: + for dec in additional_decorators[new_method_name]: + new_method = dec(new_method) + + if hasattr(testcase, new_method_name): + raise RuntimeError( + f"Tried to autogenerate {new_method_name} but {testcase} already " + f"has method named {new_method_name}. Please rename the original " + f"method on the TestCase." + ) + setattr(testcase, new_method_name, new_method) + + test_utils = {name: ALL_TEST_UTILS[name] for name in test_utils} + for attr in test_methods: + for prefix, tester in test_utils.items(): + construct_method(attr, prefix, tester) + + generate_tag_tests(testcase, failures_dict, additional_decorators) + + +def generate_tag_tests(testcase, failures_dict, additional_decorators): + def generate_test(qualname, definitely_not_pt2_compliant, xfailed_tests): + def inner(self): + try: + op = torch._library.utils.lookup_op(qualname) + except AttributeError as e: + # Operator not importable in this test file + raise unittest.SkipTest(f"Can't import operator {qualname}") from e + op_marked_as_compliant = torch.Tag.pt2_compliant_tag in op.tags + if not op_marked_as_compliant: + return + if not definitely_not_pt2_compliant: + return + raise AssertionError( + f"op '{qualname}' was tagged with torch.Tag.pt2_compliant_tag " + f"but it failed some of the generated opcheck tests " + f"({xfailed_tests}). This may lead to silent correctness issues, " + f"please fix this." + ) + + return inner + + for qualname, test_dict in failures_dict.data.items(): + xfailed_tests = [ + test + for test, status_dict in test_dict.items() + # We're about to delete the following test after Ed's PR + # to specialize on C++ .size() calls + if "test_aot_dispatch_static" not in test + and status_dict["status"] == "xfail" + ] + definitely_not_pt2_compliant = len(xfailed_tests) > 0 + generated = generate_test(qualname, definitely_not_pt2_compliant, xfailed_tests) + + # Could result in collisions, but unlikely. We'll raise if we see one below. + mangled_qualname = qualname.replace("::", "_").replace(".", "_") + test_name = "test_pt2_compliant_tag_" + mangled_qualname + + # You can skip this test via the additional_decorators argument + # in generate_opcheck_tests + if test_name in additional_decorators: + for decorator in additional_decorators[test_name]: + generated = decorator(generated) + + if hasattr(testcase, test_name): + raise RuntimeError( + f"Tried to generate a test named {test_name}, but it exists " + f"already. This could be because of a name collision (where " + f"we generated two tests with the same name), or where we " + f"generated a test with the same name as an existing test." + ) + setattr(testcase, test_name, generated) + + +TEST_OPTIONS = ("xfail", "skip", "xsuccess") + + +def validate_failures_dict_formatting(failures_dict_path: str) -> None: + with open(failures_dict_path) as fp: + actual = fp.read() + failures_dict = FailuresDict.load(failures_dict_path) + expected = failures_dict._save(to_str=True) + if actual == expected: + return + if should_update_failures_dict(): + failures_dict = FailuresDict.load(failures_dict_path) + failures_dict.save() + return + expected = expected.splitlines(1) + actual = actual.splitlines(1) + diff = difflib.unified_diff(actual, expected) + diff = "".join(diff) + raise RuntimeError( + f"\n{diff}\n\nExpected the failures dict to be formatted " + f"a certain way. Please see the above diff; you can correct " + f"this either manually or by re-running the test with " + f"PYTORCH_OPCHECK_ACCEPT=1" + ) + + +def validate_failures_dict_structure( + failure_dict: "FailuresDict", test_utils: List[str], testcase: Any +) -> None: + """Validates the failures dict. + + The failure dict looks something like the following. + It maps operator name (qualname) to a list of autogenerated tests. + Each autogenerated test may have a check for the operator (if the operator is + called by the test); the dictionary specifies if we should skip the check, + or if we expect some check to fail. + + { + "fbgemm::split_lengths": { + "test_schema__test_split_lengths": { + "comment": "you can put whatever you want into the comment section", + "status": "xfail", + } + "test_schema__test_split_lengths_empty": { + "comment": "", + "status": "skip", + }, + }, + "fbgemm::gather_lengths": { + "test_schema__test_gather_lengths": { + "comment": "", + "status": "skip", + }, + }, + } + + """ + failure_dict = failure_dict.data + qualnames = list(failure_dict.keys()) + for test_to_option in failure_dict.values(): + test_names = list(test_to_option.keys()) + for test_name, test_dict in test_to_option.items(): + if set(test_dict.keys()) != set({"comment", "status"}): + raise RuntimeError( + "in failures_dict, expected sub-dict to have keys 'comment' and 'status'" + ) + test_option = test_dict["status"] + if test_option not in TEST_OPTIONS: + raise RuntimeError( + f"In failures_dict, got status={test_option} but it needs to be in {TEST_OPTIONS}" + ) + test_class, actual_test_name = test_name.split(".") + if not any(actual_test_name.startswith(test) for test in test_utils): + raise RuntimeError( + f"In failures_dict, test name '{test_name}' should begin with one of {test_utils}" + ) + for test in test_utils: + if not actual_test_name.startswith(test): + continue + base_test_name = actual_test_name[len(test) + 2 :] + # remove potential pytest parametrization suffix + base_test_name = re.sub(r"\[.*\]", "", base_test_name) + if testcase.__name__ != test_class: + continue + if hasattr(testcase, base_test_name): + continue + raise RuntimeError( + f"In failures dict, got test name '{test_name}'. We parsed this as " + f"running test '{test}' on '{base_test_name}', but " + f"{base_test_name} does not exist on the TestCase '{testcase.__name__}]. " + f"Maybe you need to change the test name?" + ) + + +def should_update_failures_dict() -> bool: + key = "PYTORCH_OPCHECK_ACCEPT" + return key in os.environ and os.environ[key] == "1" + + +_is_inside_opcheck_mode = threading.local() +_is_inside_opcheck_mode.value = False + + +def is_inside_opcheck_mode(): + return _is_inside_opcheck_mode.value + + +class OpCheckMode(TorchFunctionMode): + """ + For a given test, OpCheckMode intercepts calls to operators and runs + test_util(op, args, kwargs) for each intercepted (op, args, kwargs). + """ + + def __init__( + self, + namespaces: List[str], + test_util_name: str, + test_util: Callable, + failures_dict: "FailuresDict", + test_name: str, + failures_dict_path: str, + ): + # We will intercept calls to ops with these namespaces + self.namespaces = namespaces + # The test utility function. Its signature should be (op, args, kwargs) -> None. + # Examples of test utilities are: schema_check, make_fx_check + self.test_util = test_util + self.test_util_name = test_util_name + # The name of the test that is running this OpCheckMode. + self.test_name = test_name + # Maps qualname -> test_name -> skip/xfail + # Tells us if we should skip a test or assert that there is a failure. + self.failures_dict = failures_dict + # Location of the failures dict. Makes it so that the error message is better. + self.failures_dict_path = failures_dict_path + + # OpCheckMode surpresses errors, collects them here, and then raises them on exit. + # Maps qualname -> List[(Exception, func, maybe args, maybe kwargs)] + self.seen_ops_to_errors = {} + + def maybe_raise_errors_on_exit(self) -> None: + # Check expected failures first + for qualname in self.seen_ops_to_errors.keys(): + option = self.failures_dict.get_status(qualname, self.test_name) + if len(self.seen_ops_to_errors[qualname]) == 0: + if should_update_failures_dict(): + self.failures_dict.set_status( + qualname, self.test_name, "xsuccess", comment="" + ) + else: + if option == "xfail": + raise OpCheckError( + f"generate_opcheck_tests: Unexpected success for operator " + f"{qualname} on test {self.test_name}. This may mean that " + f"you have fixed this test failure. Please rerun the test with " + f"PYTORCH_OPCHECK_ACCEPT=1 to automatically update the test runner " + f"or manually remove the " + f"expected failure in the failure dict at " + f"{self.failures_dict_path}" + f"For more details, see " + f"{GDOC}" + ) + continue + failed_ops = [] + for qualname in self.seen_ops_to_errors.keys(): + option = self.failures_dict.get_status(qualname, self.test_name) + if option != "xsuccess": + continue + if len(self.seen_ops_to_errors[qualname]) == 0: + continue + failed_ops.append(qualname) + if not failed_ops: + return + + if should_update_failures_dict(): + for op in failed_ops: + self.failures_dict.set_status(op, self.test_name, "xfail") + return + + # Raise from the first error but also report about all of them to make + # recording xfails easier. + ex, op, args, kwargs = self.seen_ops_to_errors[failed_ops[0]][0] + repro_command = generate_repro( + self.test_util_name, op, args, kwargs, save_data=should_print_better_repro() + ) + raise OpCheckError( + f"Test generated by `generate_opcheck_tests`, {self.test_name}, " + f"failed on operators {failed_ops}. This usually means that the " + f"operators are not implemented correctly and may lead to silently " + f"incorrect behavior. Set PYTORCH_OPCHECK_PRINT_BETTER_REPRO=1 for a standalone repro, " + f"or please see " + f"{GDOC} " + f"for more recommendations. " + f"To reproduce this problem locally, try to run the following:\n{repro_command}" + ) from ex + + def __enter__(self, *args, **kwargs): + self.prev_is_opcheck_mode = _is_inside_opcheck_mode.value + self.prev_dynamo_disable = os.environ.get("TORCHDYNAMO_DISABLE", "") + _is_inside_opcheck_mode.value = True + os.environ["TORCHDYNAMO_DISABLE"] = "1" + return super().__enter__(*args, **kwargs) + + def __exit__(self, *args, **kwargs): + _is_inside_opcheck_mode.value = self.prev_is_opcheck_mode + os.environ["TORCHDYNAMO_DISABLE"] = self.prev_dynamo_disable + try: + self.maybe_raise_errors_on_exit() + if should_update_failures_dict(): + self.failures_dict.save() + finally: + result = super().__exit__(*args, **kwargs) + return result + + def run_test_util(self, op, args, kwargs): + try: + self.test_util(op, args, kwargs, copy_inputs=False) + except torch._subclasses.fake_tensor.UnsupportedFakeTensorException: + # We might get here if the input is already a FakeTensor + # or if we're in a torch.compile block. Just ignore these + # since we can't handle them and reporting them as failures + # is too noisy. + pass + + def __torch_function__(self, func, types, args=(), kwargs=None): + kwargs = kwargs if kwargs else {} + + # Only intercept calls to operators + if not isinstance(func, (torch._ops.OpOverloadPacket, torch._ops.OpOverload)): + return func(*args, **kwargs) + if ( + torch.jit.is_tracing() + or torch.jit.is_scripting() + or torch._dynamo.is_compiling() + ): + return func(*args, **kwargs) + # Pre-existing code may not use the .default overload. If we see an + # OpOverloadPacket and we cannot resolve the overload, then we just throw + # and ask the user to clarify. Otherwise, we attempt to resolve the overload. + if isinstance(func, torch._ops.OpOverloadPacket): + func = resolve_unique_overload_or_throw(func) + qualname = func.name() + ns = qualname.split("::")[0] + if ns not in self.namespaces: + return func(*args, **kwargs) + + args_c, kwargs_c = deepcopy_tensors((args, kwargs)) + result = func(*args, **kwargs) + + option = self.failures_dict.get_status(qualname, self.test_name) + if option == "xsuccess" or option == "xfail": + # Surpress all errors during execution. Raise them during __exit__. + try: + if qualname not in self.seen_ops_to_errors: + self.seen_ops_to_errors[qualname] = [] + self.run_test_util(func, args_c, kwargs_c) + except Exception as ex: + if should_print_better_repro(): + self.seen_ops_to_errors[qualname].append((ex, func, args, kwargs)) + else: + self.seen_ops_to_errors[qualname].append((ex, func, None, None)) + elif option == "skip": + pass + return result + + +def should_print_better_repro() -> None: + """If set, the tests generated by `generate_opcheck_tests` will print a + repro command on failure. + + In order to print the repro command, we need to save some tensors to disk. + These will be saved under the following directory: + {tempfile.gettempdir()}/pytorch_opcheck_safe_to_delete/. + + Although this is a temp folder, it will usually not automatically get cleaned + up, so you'll need to manually delete it. + """ + key = "PYTORCH_OPCHECK_PRINT_BETTER_REPRO" + if key not in os.environ: + return False + value = os.environ[key] + return value == "1" or value == 1 + + +def opcheck( + op: torch._ops.OperatorBase, + args: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]] = None, + *, + test_utils: Union[str, List[str]] = "ALL", + raise_exception: bool = True, +) -> Dict[str, str]: + """Given an operator and some sample arguments, tests if the operator is + registered correctly. + + We test the following (which are important for correctness in eager-mode + PyTorch and with torch.compile): + - test_schema: if the operator's schema is correct. + - test_autograd_registration: if autograd was registered correctly, + i.e. to the correct DispatchKey. + - test_faketensor: If the operator has a FakeTensor implementation + (and if it is correct). + - test_aot_dispatch_static: If the operator works with + AOTAutograd/AOTDispatch, which is one of the parts in the PT2 stack. + Checks that the outputs (and gradients, if they are computable) + of the operator are the same under eager-mode PyTorch and torch.compile. + - test_aot_dispatch_dynamic: Same as aot_dispatch_static, but + tests dynamic shapes instead of static shapes. + + For best results, please call ``opcheck`` multiple times with a + representative set of inputs. For example, if your operator supports + autograd, please use ``opcheck`` with inputs that require_grad. + + Args: + op: The operator. Should look like torch.ops.aten.foo + args: The args to the operator + kwargs: The kwargs to the operator + test_utils: Tests that we should run. Default: all of them. + Example: ["test_schema", "test_faketensor"] + raise_exception: If we should raise an exception on the first + error. If False, we will return a dict with information + on if each test passed or not. + + """ + + if kwargs is None: + kwargs = {} + if isinstance(op, torch._ops.OpOverloadPacket): + op = resolve_unique_overload_or_throw(op) + if not isinstance(op, torch._ops.OpOverload): + raise ValueError( + f"opcheck(op, ...): op must be instance of torch._ops.OpOverload, " + f"e.g. torch.ops.aten.sin.default, got {type(op)}" + ) + if test_utils == "ALL": + test_utils = tuple(ALL_TEST_UTILS.keys()) + if isinstance(test_utils, str): + test_utils = (test_utils,) + if not isinstance(test_utils, (tuple, list)) or not set(test_utils).issubset( + ALL_TEST_UTILS.keys() + ): + raise ValueError( + f"opcheck(op, ..., test_utils={test_utils}), expected test_utils " + f"to be subset of {tuple(ALL_TEST_UTILS.keys())} but it was not" + ) + + results_dict = {} + for test_util in test_utils: + tester = ALL_TEST_UTILS[test_util] + try: + tester(op, args, kwargs) + results_dict[test_util] = "SUCCESS" + except Exception as ex: + if raise_exception: + raise OpCheckError( + f"opcheck(op, ...): {test_util} failed with {ex} " + f"(scroll up for stack trace)" + ) from ex + results_dict[test_util] = ex + return results_dict + + +class OpCheckError(Exception): + pass + + +def generate_repro( + test: str, + op: torch._ops.OpOverload, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + *, + save_data: bool, + dry_run: bool = False, +) -> str: + if save_data: + now = datetime.datetime.now() + path = os.path.join(tempfile.gettempdir(), "pytorch_opcheck_safe_to_delete") + unix_timestamp = datetime.datetime.timestamp(now) * 100000 + filepath = os.path.join(path, f"repro_{unix_timestamp}.pt") + if not dry_run: + if not os.path.exists(path): + os.makedirs(path) + torch.save((args, kwargs), filepath) + args_kwargs = f'args, kwargs = torch.load("{filepath}")' + else: + args_kwargs = ( + "# If you rerun your test with PYTORCH_OPCHECK_PRINT_BETTER_REPRO=1\n" + "# we will fill them in same (args, kwargs) as in your test\n" + "args = () # args to the operator\n" + "kwargs = {} # kwargs to the operator" + ) + + ns, name = op._schema.name.split("::") + overload = op._overloadname + + repro_command = ( + f"# =========================================================\n" + f"# BEGIN REPRO SCRIPT\n" + f"# =========================================================\n" + f"import torch\n" + f"from torch.testing._internal.optests import opcheck\n" + f"\n" + f"# Make sure you have loaded the library that contains the op\n" + f"# via an import or torch.ops.load_library(...)\n" + f"op = torch.ops.{ns}.{name}.{overload}\n" + f"\n" + f"{args_kwargs}\n" + f'opcheck(op, args, kwargs, test_utils="{test}")\n' + f"# =========================================================\n" + f"# END REPRO SCRIPT\n" + f"# =========================================================\n" + ) + return repro_command + + +def resolve_unique_overload_or_throw( + op: torch._ops.OpOverloadPacket, +) -> torch._ops.OpOverload: + all_schemas = torch._C._jit_get_schemas_for_operator(op._qualified_op_name) + if len(all_schemas) != 1: + raise RuntimeError( + f"opcheck can only test operators without overloads. " + f"Got the following overloads for {op._qualified_op_name}: " + f"{[schema.overload_name for schema in all_schemas]}" + ) + + overload_name = all_schemas[0].overload_name + if overload_name == "": + return op.default + return getattr(op, overload_name) + + +DUMP_OPTIONS = {"indent": 2, "sort_keys": True} + + +FailuresDictData = Dict[str, Dict[str, Dict[str, str]]] + + +VERSION = 1 +DESCRIPTION = ( + f"This is a dict containing failures for tests autogenerated by " + f"generate_opcheck_tests. " + f"For more details, please see {GDOC}" +) + + +class FailuresDict: + def __init__(self, path: str, data: FailuresDictData): + self.path = path + self.data = data + + @staticmethod + def load(path, *, create_file=False) -> "FailuresDict": + if create_file and not os.path.exists(path): + result = FailuresDict(path, {}) + FailuresDict.save() + return result + with open(path) as fp: + contents = fp.read() + if contents.strip() == "": + dct = { + "_description": DESCRIPTION, + "data": {}, + "_version": VERSION, + } + else: + dct = json.loads(contents) + assert "data" in dct + assert "_version" in dct and dct["_version"] == VERSION + return FailuresDict(path, dct["data"]) + + def _save(self, to_str=False) -> Optional[str]: + to_dump = { + "_description": DESCRIPTION, + "data": self.data, + "_version": VERSION, + } + # json.dumps doesn't end with a newline. Let's add one because files + # should end in newlines. + serialized = json.dumps(to_dump, **DUMP_OPTIONS) + "\n" + if to_str: + return serialized + with open(self.path, "w") as fp: + fp.write(serialized) + return None + + def save(self) -> None: + return self._save() + + def get_status(self, qualname: str, test_name: str) -> str: + if qualname not in self.data: + return "xsuccess" + dct = self.data[qualname] + if test_name not in dct: + return "xsuccess" + return dct[test_name]["status"] + + def set_status( + self, + qualname: str, + test_name: str, + status: str, + *, + comment: Optional[str] = None, + ): + if qualname not in self.data: + self.data[qualname] = {} + dct = self.data[qualname] + if test_name not in dct: + dct[test_name] = {"status": None, "comment": ""} + + if status == "xsuccess": + # The default status is "xsuccess". + del dct[test_name] + else: + dct[test_name]["status"] = status + if comment is not None: + dct[test_name]["comment"] = comment diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/make_fx.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/make_fx.py new file mode 100644 index 0000000000000000000000000000000000000000..f50c0cd9a244b6aa5d772c1293f3aa815eb292e1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/optests/make_fx.py @@ -0,0 +1,87 @@ +import torch +from torch.fx.experimental.proxy_tensor import make_fx +from torch.testing._internal.common_methods_invocations import wrapper_set_seed +import torch.utils._pytree as pytree + + +def make_fx_check( + func, + args, + kwargs, + tracing_mode, + assert_close=torch.testing.assert_close, + randomize_data=False, +): + f, *new_args = handle_sizes_for_dynamic_shapes(func, args, kwargs) + + def run(f, *args, **kwargs): + return wrapper_set_seed(f, *args, **kwargs) + + traced_f = make_fx(f, tracing_mode=tracing_mode)(*new_args) + + msg = ( + "op(*args, **kwargs) and make_fx(op)(*args, **kwargs) produced different " + "values. This could mean that your abstract impls (meta/FakeTensor impls) " + "are incorrect, that your operator is not completely traceable (e.g., " + "it relies on some global state), or that there is a bug in make_fx. " + "Note that if you passed a python function (and not an operator) to " + "make_fx_check, it is still possible that the python function will still " + "work with torch.compile because it handles capturing pieces of " + "your python code to compile." + ) + + # Randomize the data and run the traced graph with it, to catch bugs + # where we may have baked in Tensor data into the trace. + # This is not guaranteed to succeed, because `f` might have preconditions + # on the values of the inputs, so we just ignore if we used + # random data and it fails. + if randomize_data: + new_args = randomize(new_args) + try: + expected = run(f, *new_args) + except Exception: + if randomize_data: + return + raise + result = run(traced_f, *new_args) + assert_close(result, expected, msg=msg) + + +# Arguably we should make make_fx promote torch.Size() objects to symbolic shapes. +# Absent that, here is our strategy: +# +# If any argument is a torch.Size(), maybe get dynamic shapes for it by: +# - Create a temporary Tensor whose size is the torch.Size() we want. Note that +# we use an expanded Tensor as we cannot pass "meta" Tensors to make_fx. +# - Pass it to make_fx such that it is is converted to a proxy Tensor +# - Unpack the size in the wrapper to get a torch.Size with dynamic shapes (in +# symbolic mode, a no-op otherwise) +def handle_sizes_for_dynamic_shapes(func, args, kwargs): + def f(args, kwargs, extra_args, extra_kwargs): + if extra_args: + for i, t in extra_args: + args[i] = t.size() + if extra_kwargs: + for k, t in extra_kwargs.items(): + kwargs[k] = t.size() + + return func(*args, **kwargs) + + extra_args = [] + extra_kwargs = {} + for i, arg in enumerate(args): + if isinstance(arg, torch.Size): + extra_args.append((i, torch.empty(arg, device="cpu"))) + for key, value in kwargs.items(): + if isinstance(value, torch.Size): + extra_kwargs[key] = torch.empty(value, device="cpu") + + return f, args, kwargs, extra_args, extra_kwargs + + +def randomize(args): + def transform(x): + if not x.dtype.is_floating_point: + return x + return x.detach().clone().uniform_(0, 1).requires_grad_(x.requires_grad) + return pytree.tree_map_only(torch.Tensor, transform, args) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/quantization_torch_package_models.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/quantization_torch_package_models.py new file mode 100644 index 0000000000000000000000000000000000000000..8f7551496a015ef3240e1d7645a7b8af69755e17 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/quantization_torch_package_models.py @@ -0,0 +1,31 @@ +import math + +import torch +import torch.nn as nn + + +class LinearReluFunctionalChild(nn.Module): + def __init__(self, N): + super().__init__() + self.w1 = nn.Parameter(torch.empty(N, N)) + self.b1 = nn.Parameter(torch.zeros(N)) + torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5)) + + def forward(self, x): + x = torch.nn.functional.linear(x, self.w1, self.b1) + x = torch.nn.functional.relu(x) + return x + +class LinearReluFunctional(nn.Module): + def __init__(self, N): + super().__init__() + self.child = LinearReluFunctionalChild(N) + self.w1 = nn.Parameter(torch.empty(N, N)) + self.b1 = nn.Parameter(torch.zeros(N)) + torch.nn.init.kaiming_uniform_(self.w1, a=math.sqrt(5)) + + def forward(self, x): + x = self.child(x) + x = torch.nn.functional.linear(x, self.w1, self.b1) + x = torch.nn.functional.relu(x) + return x diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/triton_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/triton_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ddd33109ba0eee2e7dd4ca239536928cd73a3eaf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/triton_utils.py @@ -0,0 +1,157 @@ +import functools +import unittest + +from torch.testing._internal.inductor_utils import HAS_CUDA + +requires_cuda = functools.partial(unittest.skipIf, not HAS_CUDA, "requires cuda") + +if HAS_CUDA: + import triton + from triton import language as tl + + # Define here so that multiple tests can take advantage of it + @triton.jit + def add_kernel( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def add_kernel_with_optional_param( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + ARGS_PASSED: "tl.constexpr", + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + if ARGS_PASSED == "two": + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + else: + output = x + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.autotune( + configs=[ + triton.Config({"BLOCK_SIZE": 128}, num_stages=3, num_warps=8), + triton.Config({"BLOCK_SIZE": 64}, num_stages=3, num_warps=8), + ], + key=[], + ) + @triton.jit + def add_kernel_autotuned( + in_ptr0, + in_ptr1, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + y = tl.load(in_ptr1 + offsets, mask=mask) + output = x + y + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.autotune( + configs=[ + triton.Config( + {"BLOCK_SIZE_X": 128, "BLOCK_SIZE_Y": 128}, num_stages=3, num_warps=8 + ), + triton.Config( + {"BLOCK_SIZE_X": 64, "BLOCK_SIZE_Y": 64}, num_stages=3, num_warps=8 + ), + ], + key=[], + ) + @triton.jit + def add_kernel_2d_autotuned( + in_ptr0, + in_ptr1, + out_ptr, + x_elements, + y_elements, + BLOCK_SIZE_X: "tl.constexpr", + BLOCK_SIZE_Y: "tl.constexpr", + ): + xoffset = tl.program_id(0) * BLOCK_SIZE_X + xindex = xoffset + tl.arange(0, BLOCK_SIZE_X)[:, None] + xmask = xindex < x_elements + yoffset = tl.program_id(1) * BLOCK_SIZE_Y + yindex = yoffset + tl.arange(0, BLOCK_SIZE_Y)[None, :] + ymask = yindex < y_elements + x1 = xindex + y0 = yindex + tmp0 = tl.load(in_ptr0 + (x1 + (x_elements * y0)), xmask & ymask) + tmp1 = tl.load(in_ptr0 + (y0 + (y_elements * x1)), xmask & ymask) + tmp2 = tmp0 + tmp1 + tl.store(out_ptr + (x1 + (x_elements * y0)), tmp2, xmask & ymask) + + @triton.jit + def mul2_kernel( + in_ptr0, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(in_ptr0 + offsets, mask=mask) + output = 2 * x + tl.store(out_ptr + offsets, output, mask=mask) + + @triton.jit + def mul2_inplace_kernel( + ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(ptr + offsets, mask=mask) + output = 2 * x + tl.store(ptr + offsets, output, mask=mask) + + @triton.jit + def zero_negs(x): + return tl.where(x >= 0, x, 0) + + @triton.jit + def indirection_kernel( + in_ptr0, + out_ptr, + n_elements, + BLOCK_SIZE: "tl.constexpr", + ACTIVATION: "tl.constexpr", + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + if ACTIVATION == "mul2_inplace_kernel": + mul2_inplace_kernel(in_ptr0, n_elements, BLOCK_SIZE=BLOCK_SIZE) + x = tl.load(in_ptr0 + offsets, mask=mask) + tl.store(out_ptr + offsets, x, mask=mask) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/two_tensor.py b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/two_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..81f9fd400898403bd3d7c038673acd248ce5acbb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/two_tensor.py @@ -0,0 +1,80 @@ +import torch +import torch.utils._pytree as pytree +from torch.utils._python_dispatch import return_and_correct_aliasing + + +# A simple tensor subclass that holds two tensors internally, and runs every op on both tensors. +class TwoTensor(torch.Tensor): + @staticmethod + def __new__(cls, a, b): + assert ( + a.device == b.device + and a.layout == b.layout + and a.requires_grad == b.requires_grad + and a.dtype == b.dtype + ) + # I guess it would be more accurate to represent the shape as torch.cat(a, b).shape + shape = a.shape + kwargs = {} + kwargs["strides"] = a.stride() + kwargs["storage_offset"] = a.storage_offset() + kwargs["device"] = a.device + kwargs["layout"] = a.layout + kwargs["requires_grad"] = a.requires_grad + kwargs["dtype"] = a.dtype + out = torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) + + assert a.shape == b.shape + assert a.stride() == b.stride() + assert a.storage_offset() == b.storage_offset() + return out + + def __init__(self, a, b): + self.a = a + self.b = b + + def __repr__(self): + a_repr = repr(self.a) + b_repr = repr(self.b) + return f"TwoTensor({a_repr}, {b_repr})" + + def __tensor_flatten__(self): + return ["a", "b"], None + + @staticmethod + def __tensor_unflatten__(inner_tensors, meta): + assert meta is None + a, b = inner_tensors["a"], inner_tensors["b"] + return TwoTensor(a, b) + + @classmethod + def __torch_dispatch__(cls, func, types, args, kwargs): + if kwargs is None: + kwargs = {} + args_a = pytree.tree_map_only(TwoTensor, lambda x: x.a, args) + args_b = pytree.tree_map_only(TwoTensor, lambda x: x.b, args) + + kwargs_a = pytree.tree_map_only(TwoTensor, lambda x: x.a, kwargs) + kwargs_b = pytree.tree_map_only(TwoTensor, lambda x: x.b, kwargs) + + out_a = func(*args_a, **kwargs_a) + out_b = func(*args_b, **kwargs_b) + assert type(out_a) == type(out_b) + out_a_flat, spec = pytree.tree_flatten(out_a) + out_b_flat = pytree.tree_leaves(out_b) + # for aten ops that return non-tensors, just assume that + # our two inner tensors return the same value + out_flat = [ + TwoTensor(o_a, o_b) if isinstance(o_a, torch.Tensor) else o_a + for o_a, o_b in zip(out_a_flat, out_b_flat) + ] + out = pytree.tree_unflatten(out_flat, spec) + return return_and_correct_aliasing(func, args, kwargs, out) + + +class TwoTensorMode(torch.utils._python_dispatch.TorchDispatchMode): + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + out = func(*args, **kwargs) + if torch._subclasses.fake_tensor._is_tensor_constructor(func): + out = TwoTensor(out, out.clone()) + return out