text
stringlengths 145
7.65M
|
---|
====================================================================================================================
SOURCE CODE FILE: _typing_utils.py
LINES: 1
SIZE: 0.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_typing_utils.py
ENCODING: utf-8
```py
"""Miscellaneous utilities to aid with typing."""
from typing import Optional, TypeVar
# Helper to turn Optional[T] into T when we know None either isn't
# possible or should trigger an exception.
T = TypeVar("T")
def not_none(obj: Optional[T]) -> T:
if obj is None:
raise TypeError("Invariant encountered: value was None when it should not be")
return obj
```
|
===========================================================================================================
SOURCE CODE FILE: _zip.py
LINES: 1
SIZE: 2.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\_zip.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import argparse
import glob
import os
from pathlib import Path
from zipfile import ZipFile
# Exclude some standard library modules to:
# 1. Slim down the final zipped file size
# 2. Remove functionality we don't want to support.
DENY_LIST = [
# Interface to unix databases
"dbm",
# ncurses bindings (terminal interfaces)
"curses",
# Tcl/Tk GUI
"tkinter",
"tkinter",
# Tests for the standard library
"test",
"tests",
"idle_test",
"__phello__.foo.py",
# importlib frozen modules. These are already baked into CPython.
"_bootstrap.py",
"_bootstrap_external.py",
]
strip_file_dir = ""
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix) :]
return text
def write_to_zip(file_path, strip_file_path, zf, prepend_str=""):
stripped_file_path = prepend_str + remove_prefix(file_path, strip_file_dir + "/")
path = Path(stripped_file_path)
if path.name in DENY_LIST:
return
zf.write(file_path, stripped_file_path)
def main() -> None:
global strip_file_dir
parser = argparse.ArgumentParser(description="Zip py source")
parser.add_argument("paths", nargs="*", help="Paths to zip.")
parser.add_argument(
"--install-dir", "--install_dir", help="Root directory for all output files"
)
parser.add_argument(
"--strip-dir",
"--strip_dir",
help="The absolute directory we want to remove from zip",
)
parser.add_argument(
"--prepend-str",
"--prepend_str",
help="A string to prepend onto all paths of a file in the zip",
default="",
)
parser.add_argument("--zip-name", "--zip_name", help="Output zip name")
args = parser.parse_args()
zip_file_name = args.install_dir + "/" + args.zip_name
strip_file_dir = args.strip_dir
prepend_str = args.prepend_str
zf = ZipFile(zip_file_name, mode="w")
for p in sorted(args.paths):
if os.path.isdir(p):
files = glob.glob(p + "/**/*.py", recursive=True)
for file_path in sorted(files):
# strip the absolute path
write_to_zip(
file_path, strip_file_dir + "/", zf, prepend_str=prepend_str
)
else:
write_to_zip(p, strip_file_dir + "/", zf, prepend_str=prepend_str)
if __name__ == "__main__":
main() # pragma: no cover
```
|
==========================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.70 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\backcompat\__init__.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from torch._C import _set_backcompat_broadcast_warn
from torch._C import _get_backcompat_broadcast_warn
from torch._C import _set_backcompat_keepdim_warn
from torch._C import _get_backcompat_keepdim_warn
class Warning:
def __init__(self, setter, getter):
self.setter = setter
self.getter = getter
def set_enabled(self, value):
self.setter(value)
def get_enabled(self):
return self.getter()
enabled = property(get_enabled, set_enabled)
broadcast_warning = Warning(_set_backcompat_broadcast_warn, _get_backcompat_broadcast_warn)
keepdim_warning = Warning(_set_backcompat_keepdim_warn, _get_backcompat_keepdim_warn)
```
|
===========================================================================================================================
SOURCE CODE FILE: backend_registration.py
LINES: 3
SIZE: 19.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\backend_registration.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch
from torch.overrides import (
handle_torch_function,
has_torch_function_unary,
)
from torch._C import _rename_privateuse1_backend, _get_privateuse1_backend_name
from typing import Optional, Union
__all__ = ["rename_privateuse1_backend", "generate_methods_for_privateuse1_backend"]
# TODO: Should use `torch._C._get_privateuse1_backend_name()` to get
# renamed-backend name for `privateuse1`, but the func will cause an
# error with torch.jit.script, so we use the global variable named
# `_privateuse1_backend_name`.
_privateuse1_backend_name = "privateuseone"
def rename_privateuse1_backend(backend_name: str) -> None:
r"""
Rename the privateuse1 backend device to make it more convenient to use as a device name within PyTorch APIs.
The steps are:
(1) (In C++) implement kernels for various torch operations, and register them
to the PrivateUse1 dispatch key.
(2) (In python) call torch.utils.rename_privateuse1_backend("foo")
You can now use "foo" as an ordinary device string in python.
Note: this API can only be called once per process. Attempting to change
the external backend after it's already been set will result in an error.
Note(AMP): If you want to support AMP on your device, you can register a custom backend module.
The backend must register a custom backend module with ``torch._register_device_module("foo", BackendModule)``.
BackendModule needs to have the following API's:
(1) ``get_amp_supported_dtype() -> List[torch.dtype]``
get the supported dtypes on your "foo" device in AMP, maybe the "foo" device supports one more dtype.
Note(random): If you want to support to set seed for your device, BackendModule needs to have the following API's:
(1) ``_is_in_bad_fork() -> bool``
Return ``True`` if now it is in bad_fork, else return ``False``.
(2) ``manual_seed_all(seed int) -> None``
Sets the seed for generating random numbers for your devices.
(3) ``device_count() -> int``
Returns the number of "foo"s available.
(4) ``get_rng_state(device: Union[int, str, torch.device] = 'foo') -> Tensor``
Returns a list of ByteTensor representing the random number states of all devices.
(5) ``set_rng_state(new_state: Tensor, device: Union[int, str, torch.device] = 'foo') -> None``
Sets the random number generator state of the specified "foo" device.
And there are some common funcs:
(1) ``is_available() -> bool``
Returns a bool indicating if "foo" is currently available.
(2) ``current_device() -> int``
Returns the index of a currently selected device.
For more details, see https://pytorch.org/tutorials/advanced/extend_dispatcher.html#get-a-dispatch-key-for-your-backend
For an existing example, see https://github.com/bdhirsh/pytorch_open_registration_example
Example::
>>> # xdoctest: +SKIP("failing")
>>> torch.utils.rename_privateuse1_backend("foo")
# This will work, assuming that you've implemented the right C++ kernels
# to implement torch.ones.
>>> a = torch.ones(2, device="foo")
"""
_rename_privateuse1_backend(backend_name)
global _privateuse1_backend_name
_privateuse1_backend_name = backend_name
def _check_register_once(module, attr):
if hasattr(module, attr):
raise RuntimeError(f"The custom device module of {module} has already been registered with {attr}")
def _normalization_device(custom_backend_name: str, device: Optional[Union[int, str, torch.device]] = None) -> int:
def _get_current_device_index():
_get_device_index = "current_device"
if hasattr(torch, custom_backend_name) and \
hasattr(getattr(torch, custom_backend_name), _get_device_index):
return getattr(getattr(torch, custom_backend_name), _get_device_index)()
else:
# The default device index is 0.
return 0
if device is None:
return _get_current_device_index()
# if isinstance(device, str), this means that the parameter passed in is in the string format "foo:0"
# convert str object to torch.device object, and then process it uniformly
elif isinstance(device, str):
device = torch.device(device)
# variable devcie can only be torch.device type or int type
if isinstance(device, torch.device):
if device.type != custom_backend_name:
raise RuntimeError(f"Invalid device, must be {custom_backend_name} device")
elif device.index is None:
device_idx = _get_current_device_index()
else:
device_idx = device.index
# if isinstance(device, int), we can take the index number directly
else:
device_idx = device
return device_idx
def _generate_tensor_methods_for_privateuse1_backend(custom_backend_name: str) -> None:
@property # type: ignore[misc]
def wrap_tensor_backend(self: torch.Tensor) -> bool:
if has_torch_function_unary(self):
# TODO mypy doesn't support @property, see: https://github.com/python/mypy/issues/6185
return handle_torch_function(wrap_tensor_backend.__get__, (self,), self) # type: ignore[attr-defined]
return self.device.type == custom_backend_name
_check_register_once(torch.Tensor, f'is_{custom_backend_name}')
wrap_tensor_backend.fget.__name__ = f'is_{custom_backend_name}' # type: ignore[attr-defined]
setattr(torch.Tensor, f'is_{custom_backend_name}', wrap_tensor_backend)
def wrap_tensor_to(self: torch.Tensor, device: Optional[Union[int, torch.device]] = None, non_blocking=False,
**kwargs) -> torch.Tensor:
r"""Perform Tensor device conversion. Call the to operator implementation.
.. note::
If the ``self`` Tensor already
has the correct :class:`torch.device`, then ``self`` is returned.
Otherwise, the returned tensor is a copy of ``self`` with the desired :class:`torch.device`.
Args:
device (int, optional): if specified, all parameters will be copied to that device
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host. Otherwise,
the argument has no effect.
**kwargs (dict): For compatibility, may contain the key ``memory_format`` argument.
"""
if has_torch_function_unary(self):
return handle_torch_function(wrap_tensor_to, (self,), self, device=device, non_blocking=False, **kwargs)
device_idx = _normalization_device(custom_backend_name, device)
return self.to(device=torch.device(f'{custom_backend_name}:{device_idx}'), non_blocking=non_blocking, **kwargs)
_check_register_once(torch.Tensor, custom_backend_name)
wrap_tensor_to.__name__ = custom_backend_name
setattr(torch.Tensor, custom_backend_name, wrap_tensor_to)
def _generate_module_methods_for_privateuse1_backend(custom_backend_name: str) -> None:
# Generate Module attributes and methods depends on Tensor methods,
# so we need to check whether Tensor methods is already registered.
if not hasattr(torch.Tensor, custom_backend_name):
raise RuntimeError(
f"Can not automatically generate {custom_backend_name}() method for torch.nn.Module."
f"Because torch.Tensor doesn't has the method {custom_backend_name}()."
f"For this error, you can try setting for_tensor=True.")
def wrap_module_to(self: torch.nn.modules.module.T,
device: Optional[Union[int, torch.device]] = None) -> torch.nn.modules.module.T:
r"""Move all model parameters and buffers to the custom device.
This also makes associated parameters and buffers different objects. So
it should be called before constructing optimizer if the module will
live on device while being optimized.
.. note::
This method modifies the module in-place.
Args:
device (int, optional): if specified, all parameters will be copied to that device
"""
return self._apply(lambda t: getattr(t, custom_backend_name)(device))
_check_register_once(torch.nn.Module, custom_backend_name)
setattr(torch.nn.Module, custom_backend_name, wrap_module_to)
def _generate_packed_sequence_methods_for_privateuse1_backend(custom_backend_name: str) -> None:
# Generate PackedSequence Module attributes and methods depends on Tensor methods,
# so we need to check whether Tensor methods is already registered.
if not hasattr(torch.Tensor, f'is_{custom_backend_name}') or \
not hasattr(torch.Tensor, custom_backend_name):
raise RuntimeError(
f"Can not automatically generate is_{custom_backend_name}() or "
f"{custom_backend_name}() method for torch.nn.utils.rnn.PackedSequence."
f"Because torch.Tensor doesn't has the method is_{custom_backend_name}()"
f"or {custom_backend_name}()."
f"For this error, you can try setting for_tensor=True.")
@property # type: ignore[misc]
def wrap_tensor_backend(self: torch.nn.utils.rnn.PackedSequence) -> bool:
return self.data.device.type == custom_backend_name
_check_register_once(torch.nn.utils.rnn.PackedSequence, f'is_{custom_backend_name}')
setattr(torch.nn.utils.rnn.PackedSequence, f'is_{custom_backend_name}', wrap_tensor_backend)
def wrap_module_to(self: torch.nn.utils.rnn.PackedSequence,
*args, **kwargs) -> torch.nn.utils.rnn.PackedSequence:
r"""Move all model parameters and buffers to the custom device.
This also makes associated parameters and buffers different objects. So
it should be called before constructing optimizer if the module will
live on device while being optimized.
.. note::
This method modifies the module in-place.
Args:
device (int, optional): if specified, all parameters will be copied to that device
"""
ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs)
if ex.device.type == custom_backend_name:
return self.to(*args, **kwargs)
kwargs.update({'device': custom_backend_name})
return self.to(*args, **kwargs)
_check_register_once(torch.nn.utils.rnn.PackedSequence, custom_backend_name)
setattr(torch.nn.utils.rnn.PackedSequence, custom_backend_name, wrap_module_to)
def _generate_storage_methods_for_privateuse1_backend(custom_backend_name: str,
unsupported_dtype: Optional[list[torch.dtype]] = None) -> None:
# Attribute is registered in the _StorageBase class
# and UntypedStorage obtains through inheritance.
@property # type: ignore[misc]
def wrap_storage_backend(self: torch.storage._StorageBase) -> bool:
r"""Return the internal :class:`torch.UntypedStorage`."""
return self.device.type == custom_backend_name
_check_register_once(torch.storage._StorageBase, f'is_{custom_backend_name}')
setattr(torch.storage._StorageBase, f'is_{custom_backend_name}', wrap_storage_backend)
def wrap_storage_to(self, device=None, non_blocking=False):
r"""Return a copy of this object in custom device memory.
If this object is already in device memory and on the correct device, then
no copy is performed and the original object is returned.
Args:
device (int): The destination device id. Defaults to the current device.
non_blocking (bool): If ``True`` and the source is in pinned memory,
the copy will be asynchronous with respect to the host. Otherwise,
the argument has no effect.
"""
# There should be a judgment related to storage device and a judgment related to storage type,
# but it depends on the extended function, so this part is temporarily omitted in the automatic generation.
device_idx = _normalization_device(custom_backend_name, device)
if getattr(self, f'is_{custom_backend_name}'):
# storage has already on expected device.
if self.get_device() == device_idx:
return self
# For sparse storage, custom need to extend the implementation by themselves.
if self.is_sparse:
raise RuntimeError(f"Can not support a sparse storage move to {custom_backend_name} backend")
# create untyped_storage and copy data
untyped_storage = torch.UntypedStorage(
self.size(), device=torch.device(f'{custom_backend_name}:{device_idx}')
)
untyped_storage.copy_(self, non_blocking)
return untyped_storage
_check_register_once(torch.storage._StorageBase, custom_backend_name)
setattr(torch.storage._StorageBase, custom_backend_name, wrap_storage_to)
# Register the corresponding attribute for the TypedStorage class.
# When the TypedStorage class is removed, the registration is also removed.
@property # type: ignore[misc]
def wrap_typed_storage_backend(self: torch.storage.TypedStorage) -> bool:
torch.storage._warn_typed_storage_removal()
return self._untyped_storage.device.type == custom_backend_name
_check_register_once(torch.TypedStorage, f'is_{custom_backend_name}')
setattr(torch.storage.TypedStorage, f'is_{custom_backend_name}', wrap_typed_storage_backend)
def wrap_typed_storage_to(self: torch.storage.TypedStorage,
device=None, non_blocking=False, **kwargs) -> torch.storage.TypedStorage:
torch.storage._warn_typed_storage_removal()
if unsupported_dtype and self.dtype in unsupported_dtype:
raise RuntimeError(f"Cannot create {custom_backend_name} storage "
f"as {self.dtype} dtype is not supported by this backend")
custom_backend_storage: torch.UntypedStorage = getattr(
self._untyped_storage, custom_backend_name)(device, non_blocking, **kwargs)
return self._new_wrapped_storage(custom_backend_storage)
_check_register_once(torch.TypedStorage, custom_backend_name)
setattr(torch.TypedStorage, custom_backend_name, wrap_typed_storage_to)
def generate_methods_for_privateuse1_backend(for_tensor: bool = True, for_module: bool = True,
for_packed_sequence: bool = True,
for_storage: bool = False,
unsupported_dtype: Optional[list[torch.dtype]] = None) -> None:
r"""
Automatically generate attributes and methods for the custom backend after rename privateuse1 backend.
In the default scenario, storage-related methods will not be generated automatically.
When you implement kernels for various torch operations, and register them to the PrivateUse1 dispatch key.
And call the function torch.rename_privateuse1_backend("foo") to rename your backend name.
At this point, you can easily register specific methods and attributes by calling this function.
Just like torch.Tensor.foo(), torch.Tensor.is_foo, torch.Storage.foo(), torch.Storage.is_foo.
Note: We recommend you use generic functions (check devices are equal or to(device=)).
We provide these methods for convenience only and they will be "monkey patched" onto the objects
and so will not be properly typed. For Storage methods generate, if you need to support sparse data storage,
you need to extend the implementation yourself.
Args:
for_tensor (bool): whether register related methods for torch.Tensor class.
for_module (bool): whether register related methods for torch.nn.Module class.
for_storage (bool): whether register related methods for torch.Storage class.
unsupported_dtype (List[torch.dtype]): takes effect only when the storage method needs to be generated,
indicating that the storage does not support the torch.dtype type.
Example::
>>> # xdoctest: +SKIP("failing")
>>> torch.utils.rename_privateuse1_backend("foo")
>>> torch.utils.generate_methods_for_privateuse1_backend()
# Then automatically generate backend-related attributes and methods.
>>> a = torch.tensor(2).foo()
>>> a.is_foo
>>> hasattr(torch.nn.Module, 'foo')
"""
custom_backend_name = _get_privateuse1_backend_name()
if for_tensor:
_generate_tensor_methods_for_privateuse1_backend(custom_backend_name)
if for_module:
_generate_module_methods_for_privateuse1_backend(custom_backend_name)
if for_storage:
_generate_storage_methods_for_privateuse1_backend(custom_backend_name, unsupported_dtype)
if for_packed_sequence:
_generate_packed_sequence_methods_for_privateuse1_backend(custom_backend_name)
def _get_custom_mod_func(func_name: str):
r"""
Return the func named `func_name` defined in custom device module. If not defined,
return `None`. And the func is registered with `torch.utils.rename_privateuse1_backend('foo')`
and `torch._register_device_module('foo', BackendModule)`.
If the custom device module or the func is not defined, it will give warning or error message.
Args:
func_name (str): return the callable func named func_name defined in custom device module.
Example::
class DummyfooModule:
@staticmethod
def is_available():
return True
@staticmethod
def func_name(*args, **kwargs):
....
torch.utils.rename_privateuse1_backend("foo")
torch._register_device_module("foo", DummyfooModule)
foo_is_available_func = torch.utils.backend_registration._get_custom_mod_func("is_available")
if foo_is_available_func:
foo_is_available = foo_is_available_func()
func_ = torch.utils.backend_registration._get_custom_mod_func("func_name")
if func_:
result = func_(*args, **kwargs)
Attention: This function is not meant to be used directly by users, which is why
it is marked as private. It is a convenience function for backend implementers to
more easily call the hooks into their backend extensions.
"""
assert isinstance(func_name, str), f"func_name must be `str`, but got `{type(func_name)}`."
backend_name = _get_privateuse1_backend_name()
custom_device_mod = getattr(torch, backend_name, None) # type: ignore[arg-type]
function = getattr(custom_device_mod, func_name, None) # type: ignore[arg-type]
if custom_device_mod is None or function is None:
message = f'Try to call torch.{backend_name}.{func_name}. The backend must register a custom backend '
message += f"module with `torch._register_device_module('{backend_name}', BackendModule)`. And "
message += f"BackendModule needs to have the following API's:\n `{func_name}(*args, **kwargs)`. \n"
raise RuntimeError(message)
return function
```
|
=========================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\__init__.py
ENCODING: utf-8
```py
from torch.utils.benchmark.utils.common import * # noqa: F403
from torch.utils.benchmark.utils.timer import * # noqa: F403
from torch.utils.benchmark.utils.compare import * # noqa: F403
from torch.utils.benchmark.utils.fuzzer import * # noqa: F403
from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import * # noqa: F403
from torch.utils.benchmark.utils.sparse_fuzzer import * # noqa: F403
```
|
==================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\examples\__init__.py
ENCODING: utf-8
```py
```
|
============================================================================================================================================
SOURCE CODE FILE: blas_compare_setup.py
LINES: 8
SIZE: 7.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\examples\blas_compare_setup.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import collections
import os
import shutil
import subprocess
try:
# no type stub for conda command line interface
import conda.cli.python_api # type: ignore[import]
from conda.cli.python_api import Commands as conda_commands
except ImportError:
# blas_compare.py will fail to import these when it's inside a conda env,
# but that's fine as it only wants the constants.
pass
WORKING_ROOT = "/tmp/pytorch_blas_compare_environments"
MKL_2020_3 = "mkl_2020_3"
MKL_2020_0 = "mkl_2020_0"
OPEN_BLAS = "open_blas"
EIGEN = "eigen"
GENERIC_ENV_VARS = ("USE_CUDA=0", "USE_ROCM=0")
BASE_PKG_DEPS = (
"cmake",
"hypothesis",
"ninja",
"numpy",
"pyyaml",
"setuptools",
"typing_extensions",
)
SubEnvSpec = collections.namedtuple(
"SubEnvSpec", (
"generic_installs",
"special_installs",
"environment_variables",
# Validate install.
"expected_blas_symbols",
"expected_mkl_version",
))
SUB_ENVS = {
MKL_2020_3: SubEnvSpec(
generic_installs=(),
special_installs=("intel", ("mkl=2020.3", "mkl-include=2020.3")),
environment_variables=("BLAS=MKL",) + GENERIC_ENV_VARS,
expected_blas_symbols=("mkl_blas_sgemm",),
expected_mkl_version="2020.0.3",
),
MKL_2020_0: SubEnvSpec(
generic_installs=(),
special_installs=("intel", ("mkl=2020.0", "mkl-include=2020.0")),
environment_variables=("BLAS=MKL",) + GENERIC_ENV_VARS,
expected_blas_symbols=("mkl_blas_sgemm",),
expected_mkl_version="2020.0.0",
),
OPEN_BLAS: SubEnvSpec(
generic_installs=("openblas",),
special_installs=(),
environment_variables=("BLAS=OpenBLAS",) + GENERIC_ENV_VARS,
expected_blas_symbols=("exec_blas",),
expected_mkl_version=None,
),
# EIGEN: SubEnvSpec(
# generic_installs=(),
# special_installs=(),
# environment_variables=("BLAS=Eigen",) + GENERIC_ENV_VARS,
# expected_blas_symbols=(),
# ),
}
def conda_run(*args):
"""Convenience method."""
stdout, stderr, retcode = conda.cli.python_api.run_command(*args)
if retcode:
raise OSError(f"conda error: {str(args)} retcode: {retcode}\n{stderr}")
return stdout
def main():
if os.path.exists(WORKING_ROOT):
print("Cleaning: removing old working root.")
shutil.rmtree(WORKING_ROOT)
os.makedirs(WORKING_ROOT)
git_root = subprocess.check_output(
"git rev-parse --show-toplevel",
shell=True,
cwd=os.path.dirname(os.path.realpath(__file__))
).decode("utf-8").strip()
for env_name, env_spec in SUB_ENVS.items():
env_path = os.path.join(WORKING_ROOT, env_name)
print(f"Creating env: {env_name}: ({env_path})")
conda_run(
conda_commands.CREATE,
"--no-default-packages",
"--prefix", env_path,
"python=3",
)
print("Testing that env can be activated:")
base_source = subprocess.run(
f"source activate {env_path}",
shell=True,
capture_output=True,
check=False,
)
if base_source.returncode:
raise OSError(
"Failed to source base environment:\n"
f" stdout: {base_source.stdout.decode('utf-8')}\n"
f" stderr: {base_source.stderr.decode('utf-8')}"
)
print("Installing packages:")
conda_run(
conda_commands.INSTALL,
"--prefix", env_path,
*(BASE_PKG_DEPS + env_spec.generic_installs)
)
if env_spec.special_installs:
channel, channel_deps = env_spec.special_installs
print(f"Installing packages from channel: {channel}")
conda_run(
conda_commands.INSTALL,
"--prefix", env_path,
"-c", channel, *channel_deps
)
if env_spec.environment_variables:
print("Setting environment variables.")
# This does not appear to be possible using the python API.
env_set = subprocess.run(
f"source activate {env_path} && "
f"conda env config vars set {' '.join(env_spec.environment_variables)}",
shell=True,
capture_output=True,
check=False,
)
if env_set.returncode:
raise OSError(
"Failed to set environment variables:\n"
f" stdout: {env_set.stdout.decode('utf-8')}\n"
f" stderr: {env_set.stderr.decode('utf-8')}"
)
# Check that they were actually set correctly.
actual_env_vars = subprocess.run(
f"source activate {env_path} && env",
shell=True,
capture_output=True,
check=True,
).stdout.decode("utf-8").strip().splitlines()
for e in env_spec.environment_variables:
assert e in actual_env_vars, f"{e} not in envs"
print(f"Building PyTorch for env: `{env_name}`")
# We have to re-run during each build to pick up the new
# build config settings.
subprocess.run(
f"source activate {env_path} && "
f"cd {git_root} && "
"python setup.py install --cmake",
shell=True,
capture_output=True,
check=True,
)
print("Checking configuration:")
check_run = subprocess.run(
# Shameless abuse of `python -c ...`
f"source activate {env_path} && "
'python -c "'
"import torch;"
"from torch.utils.benchmark import Timer;"
"print(torch.__config__.show());"
"setup = 'x=torch.ones((128, 128));y=torch.ones((128, 128))';"
"counts = Timer('torch.mm(x, y)', setup).collect_callgrind(collect_baseline=False);"
"stats = counts.as_standardized().stats(inclusive=True);"
"print(stats.filter(lambda l: 'blas' in l.lower()))\"",
shell=True,
capture_output=True,
check=False,
)
if check_run.returncode:
raise OSError(
"Failed to set environment variables:\n"
f" stdout: {check_run.stdout.decode('utf-8')}\n"
f" stderr: {check_run.stderr.decode('utf-8')}"
)
check_run_stdout = check_run.stdout.decode('utf-8')
print(check_run_stdout)
for e in env_spec.environment_variables:
if "BLAS" in e:
assert e in check_run_stdout, f"PyTorch build did not respect `BLAS=...`: {e}"
for s in env_spec.expected_blas_symbols:
assert s in check_run_stdout
if env_spec.expected_mkl_version is not None:
assert f"- Intel(R) Math Kernel Library Version {env_spec.expected_mkl_version}" in check_run_stdout
print(f"Build complete: {env_name}")
if __name__ == "__main__":
main()
```
|
=================================================================================================================================
SOURCE CODE FILE: compare.py
LINES: 5
SIZE: 2.94 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\examples\compare.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Example of Timer and Compare APIs:
$ python -m examples.compare
"""
import pickle
import sys
import time
import torch
import torch.utils.benchmark as benchmark_utils
class FauxTorch:
"""Emulate different versions of pytorch.
In normal circumstances this would be done with multiple processes
writing serialized measurements, but this simplifies that model to
make the example clearer.
"""
def __init__(self, real_torch, extra_ns_per_element):
self._real_torch = real_torch
self._extra_ns_per_element = extra_ns_per_element
def extra_overhead(self, result):
# time.sleep has a ~65 us overhead, so only fake a
# per-element overhead if numel is large enough.
numel = int(result.numel())
if numel > 5000:
time.sleep(numel * self._extra_ns_per_element * 1e-9)
return result
def add(self, *args, **kwargs):
return self.extra_overhead(self._real_torch.add(*args, **kwargs))
def mul(self, *args, **kwargs):
return self.extra_overhead(self._real_torch.mul(*args, **kwargs))
def cat(self, *args, **kwargs):
return self.extra_overhead(self._real_torch.cat(*args, **kwargs))
def matmul(self, *args, **kwargs):
return self.extra_overhead(self._real_torch.matmul(*args, **kwargs))
def main():
tasks = [
("add", "add", "torch.add(x, y)"),
("add", "add (extra +0)", "torch.add(x, y + zero)"),
]
serialized_results = []
repeats = 2
timers = [
benchmark_utils.Timer(
stmt=stmt,
globals={
"torch": torch if branch == "master" else FauxTorch(torch, overhead_ns),
"x": torch.ones((size, 4)),
"y": torch.ones((1, 4)),
"zero": torch.zeros(()),
},
label=label,
sub_label=sub_label,
description=f"size: {size}",
env=branch,
num_threads=num_threads,
)
for branch, overhead_ns in [("master", None), ("my_branch", 1), ("severe_regression", 5)]
for label, sub_label, stmt in tasks
for size in [1, 10, 100, 1000, 10000, 50000]
for num_threads in [1, 4]
]
for i, timer in enumerate(timers * repeats):
serialized_results.append(pickle.dumps(
timer.blocked_autorange(min_run_time=0.05)
))
print(f"\r{i + 1} / {len(timers) * repeats}", end="")
sys.stdout.flush()
print()
comparison = benchmark_utils.Compare([
pickle.loads(i) for i in serialized_results
])
print("== Unformatted " + "=" * 80 + "\n" + "/" * 95 + "\n")
comparison.print()
print("== Formatted " + "=" * 80 + "\n" + "/" * 93 + "\n")
comparison.trim_significant_figures()
comparison.colorize()
comparison.print()
if __name__ == "__main__":
main()
```
|
================================================================================================================================
SOURCE CODE FILE: fuzzer.py
LINES: 3
SIZE: 2.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\examples\fuzzer.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Example of the Timer and Fuzzer APIs:
$ python -m examples.fuzzer
"""
import sys
import torch.utils.benchmark as benchmark_utils
def main():
add_fuzzer = benchmark_utils.Fuzzer(
parameters=[
[
benchmark_utils.FuzzedParameter(
name=f"k{i}",
minval=16,
maxval=16 * 1024,
distribution="loguniform",
) for i in range(3)
],
benchmark_utils.FuzzedParameter(
name="d",
distribution={2: 0.6, 3: 0.4},
),
],
tensors=[
[
benchmark_utils.FuzzedTensor(
name=name,
size=("k0", "k1", "k2"),
dim_parameter="d",
probability_contiguous=0.75,
min_elements=64 * 1024,
max_elements=128 * 1024,
) for name in ("x", "y")
],
],
seed=0,
)
n = 250
measurements = []
for i, (tensors, tensor_properties, _) in enumerate(add_fuzzer.take(n=n)):
x, x_order = tensors["x"], str(tensor_properties["x"]["order"])
y, y_order = tensors["y"], str(tensor_properties["y"]["order"])
shape = ", ".join(tuple(f'{i:>4}' for i in x.shape))
description = "".join([
f"{x.numel():>7} | {shape:<16} | ",
f"{'contiguous' if x.is_contiguous() else x_order:<12} | ",
f"{'contiguous' if y.is_contiguous() else y_order:<12} | ",
])
timer = benchmark_utils.Timer(
stmt="x + y",
globals=tensors,
description=description,
)
measurements.append(timer.blocked_autorange(min_run_time=0.1))
measurements[-1].metadata = {"numel": x.numel()}
print(f"\r{i + 1} / {n}", end="")
sys.stdout.flush()
print()
# More string munging to make pretty output.
print(f"Average attempts per valid config: {1. / (1. - add_fuzzer.rejection_rate):.1f}")
def time_fn(m):
return m.median / m.metadata["numel"]
measurements.sort(key=time_fn)
template = f"{{:>6}}{' ' * 19}Size Shape{' ' * 13}X order Y order\n{'-' * 80}"
print(template.format("Best:"))
for m in measurements[:15]:
print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}")
print("\n" + template.format("Worst:"))
for m in measurements[-15:]:
print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}")
if __name__ == "__main__":
main()
```
|
======================================================================================================================================
SOURCE CODE FILE: op_benchmark.py
LINES: 2
SIZE: 4.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\examples\op_benchmark.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Example use of Timer and op fuzzers to measure kernel performance.
$ python -m examples.op_benchmark
"""
import numpy as np
import torch
from torch.utils.benchmark import Timer
from torch.utils.benchmark.op_fuzzers.binary import BinaryOpFuzzer
from torch.utils.benchmark.op_fuzzers.unary import UnaryOpFuzzer
import operator
_MEASURE_TIME = 1.0
def assert_dicts_equal(dict_0, dict_1):
"""Builtin dict comparison will not compare numpy arrays.
e.g.
x = {"a": np.ones((2, 1))}
x == x # Raises ValueError
"""
assert set(dict_0.keys()) == set(dict_0.keys())
assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype")
def run(n, stmt, fuzzer_cls):
float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
int_iter = fuzzer_cls(seed=0, dtype=torch.int32).take(n)
raw_results = []
for i, (float_values, int_values) in enumerate(zip(float_iter, int_iter)):
float_tensors, float_tensor_params, float_params = float_values
int_tensors, int_tensor_params, int_params = int_values
# This benchmark assumes that the two fuzzers generate identically
# sized and strided Tensors, since the same seed is used.
assert_dicts_equal(float_params, int_params)
assert_dicts_equal(float_tensor_params["x"], int_tensor_params["x"])
float_measurement, int_measurement = (
Timer(
stmt,
globals=tensors,
).blocked_autorange(min_run_time=_MEASURE_TIME)
for tensors in (float_tensors, int_tensors)
)
descriptions = []
for name in float_tensors:
shape_str = "(" + ", ".join([
f"2 ** {int(np.log2(i))}"
if 2 ** int(np.log2(i)) == i and i > 1
else str(i)
for i in float_tensors[name].shape
]) + ")"
order = float_tensor_params[name]["order"]
order_str = ("" if all(order == np.arange(len(order))) else str(tuple(order)))
steps = float_tensor_params[name]["steps"]
steps_str = str(steps) if sum(steps) > len(steps) else ""
descriptions.append((name, shape_str, order_str, steps_str))
raw_results.append((float_measurement, int_measurement, descriptions))
print(f"\r{i + 1} / {n}", end="")
print()
parsed_results, name_len, shape_len, order_len, steps_len = [], 0, 0, 0, 0
for float_measurement, int_measurement, descriptions in raw_results:
t_float = float_measurement.median * 1e6
t_int = int_measurement.median * 1e6
rel_diff = abs(t_float - t_int) / (t_float + t_int) * 2
parsed_results.append((t_float, t_int, rel_diff, descriptions))
for name, shape, order, steps in descriptions:
name_len = max(name_len, len(name))
shape_len = max(shape_len, len(shape))
order_len = max(order_len, len(order))
steps_len = max(steps_len, len(steps))
parsed_results.sort(key=operator.itemgetter(2))
print(f"stmt: {stmt}")
print(f" diff faster{'':>17}{' ' * name_len} ", end="")
print(f"{'shape'.ljust(shape_len)}{'':>16}{'order'.ljust(order_len)}", end="")
print(f" steps\n{'-' * 100}")
for results, spacer in [(parsed_results[:10], "..."), (parsed_results[-10:], "")]:
for t_float, t_int, rel_diff, descriptions in results:
time_str = [f"{rel_diff * 100:>4.1f}% {'int' if t_int < t_float else 'float':<20}"]
time_str.extend(["".ljust(len(time_str[0])) for _ in descriptions[:-1]])
for t_str, (name, shape, order, steps) in zip(time_str, descriptions):
name = f"{name}:".ljust(name_len + 1)
shape = shape.ljust(shape_len + 10)
order = order.ljust(order_len)
print(f"{t_str} {name} {shape}| {order} | {steps}")
print(spacer)
def main():
run(n=100, stmt="torch.median(x, dim=0)", fuzzer_cls=UnaryOpFuzzer)
run(n=100, stmt="torch.square(x)", fuzzer_cls=UnaryOpFuzzer)
run(n=100, stmt="x + y", fuzzer_cls=BinaryOpFuzzer)
if __name__ == "__main__":
main()
```
|
=======================================================================================================================================
SOURCE CODE FILE: simple_timeit.py
LINES: 7
SIZE: 0.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\examples\simple_timeit.py
ENCODING: utf-8
```py
"""Trivial use of Timer API:
$ python -m examples.simple_timeit
"""
import torch
import torch.utils.benchmark as benchmark_utils
def main() -> None:
timer = benchmark_utils.Timer(
stmt="x + y",
globals={"x": torch.ones((4, 8)), "y": torch.ones((1, 8))},
label="Broadcasting add (4x8)",
)
for i in range(3):
print(f"Run: {i}\n{'-' * 40}")
print(f"timeit:\n{timer.timeit(10000)}\n")
print(f"autorange:\n{timer.blocked_autorange()}\n\n")
if __name__ == "__main__":
main()
```
|
================================================================================================================================================
SOURCE CODE FILE: spectral_ops_fuzz_test.py
LINES: 2
SIZE: 4.78 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\examples\spectral_ops_fuzz_test.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Microbenchmarks for the torch.fft module"""
from argparse import ArgumentParser
from collections import namedtuple
from collections.abc import Iterable
import torch
import torch.fft
from torch.utils import benchmark
from torch.utils.benchmark.op_fuzzers.spectral import SpectralOpFuzzer
def _dim_options(ndim):
if ndim == 1:
return [None]
elif ndim == 2:
return [0, 1, None]
elif ndim == 3:
return [0, 1, 2, (0, 1), (0, 2), None]
raise ValueError(f"Expected ndim in range 1-3, got {ndim}")
def run_benchmark(name: str, function: object, dtype: torch.dtype, seed: int, device: str, samples: int,
probability_regular: float):
cuda = device == 'cuda'
spectral_fuzzer = SpectralOpFuzzer(seed=seed, dtype=dtype, cuda=cuda,
probability_regular=probability_regular)
results = []
for tensors, tensor_params, params in spectral_fuzzer.take(samples):
shape = [params['k0'], params['k1'], params['k2']][:params['ndim']]
str_shape = ' x '.join([f"{s:<4}" for s in shape])
sub_label = f"{str_shape} {'' if tensor_params['x']['is_contiguous'] else '(discontiguous)'}"
for dim in _dim_options(params['ndim']):
for nthreads in (1, 4, 16) if not cuda else (1,):
measurement = benchmark.Timer(
stmt='func(x, dim=dim)',
globals={'func': function, 'x': tensors['x'], 'dim': dim},
label=f"{name}_{device}",
sub_label=sub_label,
description=f"dim={dim}",
num_threads=nthreads,
).blocked_autorange(min_run_time=1)
measurement.metadata = {
'name': name,
'device': device,
'dim': dim,
'shape': shape,
}
measurement.metadata.update(tensor_params['x'])
results.append(measurement)
return results
Benchmark = namedtuple('Benchmark', ['name', 'function', 'dtype'])
BENCHMARKS = [
Benchmark('fft_real', torch.fft.fftn, torch.float32),
Benchmark('fft_complex', torch.fft.fftn, torch.complex64),
Benchmark('ifft', torch.fft.ifftn, torch.complex64),
Benchmark('rfft', torch.fft.rfftn, torch.float32),
Benchmark('irfft', torch.fft.irfftn, torch.complex64),
]
BENCHMARK_MAP = {b.name: b for b in BENCHMARKS}
BENCHMARK_NAMES = [b.name for b in BENCHMARKS]
DEVICE_NAMES = ['cpu', 'cuda']
def _output_csv(file, results):
file.write('benchmark,device,num_threads,numel,shape,contiguous,dim,mean (us),median (us),iqr (us)\n')
for measurement in results:
metadata = measurement.metadata
device, dim, shape, name, numel, contiguous = (
metadata['device'], metadata['dim'], metadata['shape'],
metadata['name'], metadata['numel'], metadata['is_contiguous'])
if isinstance(dim, Iterable):
dim_str = '-'.join(str(d) for d in dim)
else:
dim_str = str(dim)
shape_str = 'x'.join(str(s) for s in shape)
print(name, device, measurement.task_spec.num_threads, numel, shape_str, contiguous, dim_str, # type: ignore[possibly-undefined]
measurement.mean * 1e6, measurement.median * 1e6, measurement.iqr * 1e6,
sep=',', file=file)
if __name__ == '__main__':
parser = ArgumentParser(description=__doc__)
parser.add_argument('--device', type=str, choices=DEVICE_NAMES, nargs='+', default=DEVICE_NAMES)
parser.add_argument('--bench', type=str, choices=BENCHMARK_NAMES, nargs='+', default=BENCHMARK_NAMES)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--samples', type=int, default=10)
parser.add_argument('--probability-regular', '--probability_regular', type=float, default=1.0)
parser.add_argument('-o', '--output', type=str)
args = parser.parse_args()
num_benchmarks = len(args.device) * len(args.bench)
i = 0
results = []
for device in args.device:
for bench in (BENCHMARK_MAP[b] for b in args.bench):
results += run_benchmark(
name=bench.name, function=bench.function, dtype=bench.dtype,
seed=args.seed, device=device, samples=args.samples,
probability_regular=args.probability_regular)
i += 1
print(f'Completed {bench.name} benchmark on {device} ({i} of {num_benchmarks})')
if args.output is not None:
with open(args.output, 'w') as f:
_output_csv(f, results)
compare = benchmark.Compare(results)
compare.trim_significant_figures()
compare.colorize()
compare.print()
```
|
====================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\op_fuzzers\__init__.py
ENCODING: utf-8
```py
```
|
==================================================================================================================================
SOURCE CODE FILE: binary.py
LINES: 1
SIZE: 4.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\op_fuzzers\binary.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import numpy as np
import torch
from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor
_MIN_DIM_SIZE = 16
_MAX_DIM_SIZE = 16 * 1024 ** 2
_POW_TWO_SIZES = tuple(2 ** i for i in range(
int(np.log2(_MIN_DIM_SIZE)),
int(np.log2(_MAX_DIM_SIZE)) + 1,
))
class BinaryOpFuzzer(Fuzzer):
def __init__(self, seed, dtype=torch.float32, cuda=False):
super().__init__(
parameters=[
# Dimensionality of x and y. (e.g. 1D, 2D, or 3D.)
FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
# Shapes for `x` and `y`.
# It is important to test all shapes, however
# powers of two are especially important and therefore
# warrant special attention. This is done by generating
# both a value drawn from all integers between the min and
# max allowed values, and another from only the powers of two
# (both distributions are loguniform) and then randomly
# selecting between the two.
# Moreover, `y` will occasionally have singleton
# dimensions in order to test broadcasting.
[
FuzzedParameter(
name=f"k_any_{i}",
minval=_MIN_DIM_SIZE,
maxval=_MAX_DIM_SIZE,
distribution="loguniform",
) for i in range(3)
],
[
FuzzedParameter(
name=f"k_pow2_{i}",
distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}
) for i in range(3)
],
[
FuzzedParameter(
name=f"k{i}",
distribution={
ParameterAlias(f"k_any_{i}"): 0.8,
ParameterAlias(f"k_pow2_{i}"): 0.2,
},
strict=True,
) for i in range(3)
],
[
FuzzedParameter(
name=f"y_k{i}",
distribution={
ParameterAlias(f"k{i}"): 0.8,
1: 0.2,
},
strict=True,
) for i in range(3)
],
# Steps for `x` and `y`. (Benchmarks strided memory access.)
[
FuzzedParameter(
name=f"{name}_step_{i}",
distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04},
)
for i in range(3)
for name in ("x", "y")
],
# Repeatable entropy for downstream applications.
FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
],
tensors=[
FuzzedTensor(
name="x",
size=("k0", "k1", "k2"),
steps=("x_step_0", "x_step_1", "x_step_2"),
probability_contiguous=0.75,
min_elements=4 * 1024,
max_elements=32 * 1024 ** 2,
max_allocation_bytes=2 * 1024**3, # 2 GB
dim_parameter="dim",
dtype=dtype,
cuda=cuda,
),
FuzzedTensor(
name="y",
size=("y_k0", "y_k1", "y_k2"),
steps=("x_step_0", "x_step_1", "x_step_2"),
probability_contiguous=0.75,
max_allocation_bytes=2 * 1024**3, # 2 GB
dim_parameter="dim",
dtype=dtype,
cuda=cuda,
),
],
seed=seed,
)
```
|
=========================================================================================================================================
SOURCE CODE FILE: sparse_binary.py
LINES: 1
SIZE: 4.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\op_fuzzers\sparse_binary.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import numpy as np
import torch
from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedSparseTensor
_MIN_DIM_SIZE = 16
_MAX_DIM_SIZE = 16 * 1024 ** 2
_POW_TWO_SIZES = tuple(2 ** i for i in range(
int(np.log2(_MIN_DIM_SIZE)),
int(np.log2(_MAX_DIM_SIZE)) + 1,
))
class BinaryOpSparseFuzzer(Fuzzer):
def __init__(self, seed, dtype=torch.float32, cuda=False):
super().__init__(
parameters=[
# Dimensionality of x and y. (e.g. 1D, 2D, or 3D.)
FuzzedParameter("dim_parameter", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
FuzzedParameter(
name="sparse_dim",
distribution={1: 0.4, 2: 0.4, 3: 0.2},
strict=True
),
# Shapes for `x` and `y`.
# It is important to test all shapes, however
# powers of two are especially important and therefore
# warrant special attention. This is done by generating
# both a value drawn from all integers between the min and
# max allowed values, and another from only the powers of two
# (both distributions are loguniform) and then randomly
# selecting between the two.
# Moreover, `y` will occasionally have singleton
# dimensions in order to test broadcasting.
[
FuzzedParameter(
name=f"k_any_{i}",
minval=_MIN_DIM_SIZE,
maxval=_MAX_DIM_SIZE,
distribution="loguniform",
) for i in range(3)
],
[
FuzzedParameter(
name=f"k_pow2_{i}",
distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}
) for i in range(3)
],
[
FuzzedParameter(
name=f"k{i}",
distribution={
ParameterAlias(f"k_any_{i}"): 0.8,
ParameterAlias(f"k_pow2_{i}"): 0.2,
},
strict=True,
) for i in range(3)
],
[
FuzzedParameter(
name=f"y_k{i}",
distribution={
ParameterAlias(f"k{i}"): 1.0},
strict=True,
) for i in range(3)
],
FuzzedParameter(
name="density",
distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3},
),
FuzzedParameter(
name="coalesced",
distribution={True: 0.5, False: 0.5},
),
# Repeatable entropy for downstream applications.
FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
],
tensors=[
FuzzedSparseTensor(
name="x",
size=("k0", "k1", "k2"),
dim_parameter="dim_parameter",
sparse_dim="sparse_dim",
density="density",
coalesced="coalesced",
min_elements=4 * 1024,
max_elements=32 * 1024 ** 2,
dtype=dtype,
cuda=cuda,
),
FuzzedSparseTensor(
name="y",
size=("y_k0", "y_k1", "y_k2"),
dim_parameter="dim_parameter",
sparse_dim="sparse_dim",
density="density",
coalesced="coalesced",
min_elements=4 * 1024,
max_elements=32 * 1024 ** 2,
dtype=dtype,
cuda=cuda,
),
],
seed=seed,
)
```
|
========================================================================================================================================
SOURCE CODE FILE: sparse_unary.py
LINES: 1
SIZE: 3.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\op_fuzzers\sparse_unary.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import numpy as np
import torch
from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedSparseTensor
_MIN_DIM_SIZE = 16
_MAX_DIM_SIZE = 16 * 1024 ** 2
_POW_TWO_SIZES = tuple(2 ** i for i in range(
int(np.log2(_MIN_DIM_SIZE)),
int(np.log2(_MAX_DIM_SIZE)) + 1,
))
class UnaryOpSparseFuzzer(Fuzzer):
def __init__(self, seed, dtype=torch.float32, cuda=False):
super().__init__(
parameters=[
# Sparse dim parameter of x. (e.g. 1D, 2D, or 3D.)
FuzzedParameter("dim_parameter", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
FuzzedParameter(
name="sparse_dim",
distribution={1: 0.4, 2: 0.4, 3: 0.2},
strict=True
),
# Shapes for `x`.
# It is important to test all shapes, however
# powers of two are especially important and therefore
# warrant special attention. This is done by generating
# both a value drawn from all integers between the min and
# max allowed values, and another from only the powers of two
# (both distributions are loguniform) and then randomly
# selecting between the two.
[
FuzzedParameter(
name=f"k_any_{i}",
minval=_MIN_DIM_SIZE,
maxval=_MAX_DIM_SIZE,
distribution="loguniform",
) for i in range(3)
],
[
FuzzedParameter(
name=f"k_pow2_{i}",
distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}
) for i in range(3)
],
[
FuzzedParameter(
name=f"k{i}",
distribution={
ParameterAlias(f"k_any_{i}"): 0.8,
ParameterAlias(f"k_pow2_{i}"): 0.2,
},
strict=True,
) for i in range(3)
],
FuzzedParameter(
name="density",
distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3},
),
FuzzedParameter(
name="coalesced",
distribution={True: 0.5, False: 0.5},
),
FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
],
tensors=[
FuzzedSparseTensor(
name="x",
size=("k0", "k1", "k2"),
dim_parameter="dim_parameter",
sparse_dim="sparse_dim",
min_elements=4 * 1024,
max_elements=32 * 1024 ** 2,
density="density",
coalesced="coalesced",
dtype=dtype,
cuda=cuda,
),
],
seed=seed,
)
```
|
====================================================================================================================================
SOURCE CODE FILE: spectral.py
LINES: 1
SIZE: 3.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\op_fuzzers\spectral.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import math
import torch
from torch.utils import benchmark
from torch.utils.benchmark import FuzzedParameter, FuzzedTensor, ParameterAlias
__all__ = ['SpectralOpFuzzer']
MIN_DIM_SIZE = 16
MAX_DIM_SIZE = 16 * 1024
def power_range(upper_bound, base):
return (base ** i for i in range(int(math.log(upper_bound, base)) + 1))
# List of regular numbers from MIN_DIM_SIZE to MAX_DIM_SIZE
# These numbers factorize into multiples of prime factors 2, 3, and 5 only
# and are usually the fastest in FFT implementations.
REGULAR_SIZES = []
for i in power_range(MAX_DIM_SIZE, 2):
for j in power_range(MAX_DIM_SIZE // i, 3):
ij = i * j
for k in power_range(MAX_DIM_SIZE // ij, 5):
ijk = ij * k
if ijk > MIN_DIM_SIZE:
REGULAR_SIZES.append(ijk)
REGULAR_SIZES.sort()
class SpectralOpFuzzer(benchmark.Fuzzer):
def __init__(self, *, seed: int, dtype=torch.float64,
cuda: bool = False, probability_regular: float = 1.0):
super().__init__(
parameters=[
# Dimensionality of x. (e.g. 1D, 2D, or 3D.)
FuzzedParameter("ndim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
# Shapes for `x`.
# It is important to test all shapes, however
# regular sizes are especially important to the FFT and therefore
# warrant special attention. This is done by generating
# both a value drawn from all integers between the min and
# max allowed values, and another from only the regular numbers
# (both distributions are loguniform) and then randomly
# selecting between the two.
[
FuzzedParameter(
name=f"k_any_{i}",
minval=MIN_DIM_SIZE,
maxval=MAX_DIM_SIZE,
distribution="loguniform",
) for i in range(3)
],
[
FuzzedParameter(
name=f"k_regular_{i}",
distribution={size: 1. / len(REGULAR_SIZES) for size in REGULAR_SIZES}
) for i in range(3)
],
[
FuzzedParameter(
name=f"k{i}",
distribution={
ParameterAlias(f"k_regular_{i}"): probability_regular,
ParameterAlias(f"k_any_{i}"): 1 - probability_regular,
},
strict=True,
) for i in range(3)
],
# Steps for `x`. (Benchmarks strided memory access.)
[
FuzzedParameter(
name=f"step_{i}",
distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04},
) for i in range(3)
],
],
tensors=[
FuzzedTensor(
name="x",
size=("k0", "k1", "k2"),
steps=("step_0", "step_1", "step_2"),
probability_contiguous=0.75,
min_elements=4 * 1024,
max_elements=32 * 1024 ** 2,
max_allocation_bytes=2 * 1024**3, # 2 GB
dim_parameter="ndim",
dtype=dtype,
cuda=cuda,
),
],
seed=seed,
)
```
|
=================================================================================================================================
SOURCE CODE FILE: unary.py
LINES: 1
SIZE: 3.15 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\op_fuzzers\unary.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import numpy as np
import torch
from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor
_MIN_DIM_SIZE = 16
_MAX_DIM_SIZE = 16 * 1024 ** 2
_POW_TWO_SIZES = tuple(2 ** i for i in range(
int(np.log2(_MIN_DIM_SIZE)),
int(np.log2(_MAX_DIM_SIZE)) + 1,
))
class UnaryOpFuzzer(Fuzzer):
def __init__(self, seed, dtype=torch.float32, cuda=False):
super().__init__(
parameters=[
# Dimensionality of x. (e.g. 1D, 2D, or 3D.)
FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
# Shapes for `x`.
# It is important to test all shapes, however
# powers of two are especially important and therefore
# warrant special attention. This is done by generating
# both a value drawn from all integers between the min and
# max allowed values, and another from only the powers of two
# (both distributions are loguniform) and then randomly
# selecting between the two.
[
FuzzedParameter(
name=f"k_any_{i}",
minval=_MIN_DIM_SIZE,
maxval=_MAX_DIM_SIZE,
distribution="loguniform",
) for i in range(3)
],
[
FuzzedParameter(
name=f"k_pow2_{i}",
distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}
) for i in range(3)
],
[
FuzzedParameter(
name=f"k{i}",
distribution={
ParameterAlias(f"k_any_{i}"): 0.8,
ParameterAlias(f"k_pow2_{i}"): 0.2,
},
strict=True,
) for i in range(3)
],
# Steps for `x`. (Benchmarks strided memory access.)
[
FuzzedParameter(
name=f"x_step_{i}",
distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04},
) for i in range(3)
],
# Repeatable entropy for downstream applications.
FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
],
tensors=[
FuzzedTensor(
name="x",
size=("k0", "k1", "k2"),
steps=("x_step_0", "x_step_1", "x_step_2"),
probability_contiguous=0.75,
min_elements=4 * 1024,
max_elements=32 * 1024 ** 2,
max_allocation_bytes=2 * 1024**3, # 2 GB
dim_parameter="dim",
dtype=dtype,
cuda=cuda,
),
],
seed=seed,
)
```
|
===============================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\__init__.py
ENCODING: utf-8
```py
```
|
=============================================================================================================================
SOURCE CODE FILE: _stubs.py
LINES: 1
SIZE: 0.99 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\_stubs.py
ENCODING: utf-8
```py
from typing import Any, Callable, Protocol, runtime_checkable
class TimerClass(Protocol):
"""This is the portion of the `timeit.Timer` API used by benchmark utils."""
def __init__(
self,
stmt: str,
setup: str,
timer: Callable[[], float],
globals: dict[str, Any],
**kwargs: Any,
) -> None:
...
def timeit(self, number: int) -> float:
...
@runtime_checkable
class TimeitModuleType(Protocol):
"""Modules generated from `timeit_template.cpp`."""
def timeit(self, number: int) -> float:
...
class CallgrindModuleType(Protocol):
"""Replicates the valgrind endpoints in `torch._C`.
These bindings are used to collect Callgrind profiles on earlier versions
of PyTorch and will eventually be removed.
"""
__file__: str
__name__: str
def _valgrind_supported_platform(self) -> bool:
...
def _valgrind_toggle(self) -> None:
...
```
|
=============================================================================================================================
SOURCE CODE FILE: common.py
LINES: 11
SIZE: 13.69 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\common.py
ENCODING: utf-8
```py
"""Base shared classes and utilities."""
import collections
import contextlib
import dataclasses
import os
import shutil
import tempfile
import textwrap
import time
from typing import cast, Any, Optional
from collections.abc import Iterable, Iterator
import uuid
import torch
__all__ = ["TaskSpec", "Measurement", "select_unit", "unit_to_english", "trim_sigfig", "ordered_unique", "set_torch_threads"]
_MAX_SIGNIFICANT_FIGURES = 4
_MIN_CONFIDENCE_INTERVAL = 25e-9 # 25 ns
# Measurement will include a warning if the distribution is suspect. All
# runs are expected to have some variation; these parameters set the
# thresholds.
_IQR_WARN_THRESHOLD = 0.1
_IQR_GROSS_WARN_THRESHOLD = 0.25
@dataclasses.dataclass(init=True, repr=False, eq=True, frozen=True)
class TaskSpec:
"""Container for information used to define a Timer. (except globals)"""
stmt: str
setup: str
global_setup: str = ""
label: Optional[str] = None
sub_label: Optional[str] = None
description: Optional[str] = None
env: Optional[str] = None
num_threads: int = 1
@property
def title(self) -> str:
"""Best effort attempt at a string label for the measurement."""
if self.label is not None:
return self.label + (f": {self.sub_label}" if self.sub_label else "")
elif "\n" not in self.stmt:
return self.stmt + (f": {self.sub_label}" if self.sub_label else "")
return (
f"stmt:{f' ({self.sub_label})' if self.sub_label else ''}\n"
f"{textwrap.indent(self.stmt, ' ')}"
)
def setup_str(self) -> str:
return (
"" if (self.setup == "pass" or not self.setup)
else f"setup:\n{textwrap.indent(self.setup, ' ')}" if "\n" in self.setup
else f"setup: {self.setup}"
)
def summarize(self) -> str:
"""Build TaskSpec portion of repr string for other containers."""
sections = [
self.title,
self.description or "",
self.setup_str(),
]
return "\n".join([f"{i}\n" if "\n" in i else i for i in sections if i])
_TASKSPEC_FIELDS = tuple(i.name for i in dataclasses.fields(TaskSpec))
@dataclasses.dataclass(init=True, repr=False)
class Measurement:
"""The result of a Timer measurement.
This class stores one or more measurements of a given statement. It is
serializable and provides several convenience methods
(including a detailed __repr__) for downstream consumers.
"""
number_per_run: int
raw_times: list[float]
task_spec: TaskSpec
metadata: Optional[dict[Any, Any]] = None # Reserved for user payloads.
def __post_init__(self) -> None:
self._sorted_times: tuple[float, ...] = ()
self._warnings: tuple[str, ...] = ()
self._median: float = -1.0
self._mean: float = -1.0
self._p25: float = -1.0
self._p75: float = -1.0
def __getattr__(self, name: str) -> Any:
# Forward TaskSpec fields for convenience.
if name in _TASKSPEC_FIELDS:
return getattr(self.task_spec, name)
return super().__getattribute__(name)
# =========================================================================
# == Convenience methods for statistics ===================================
# =========================================================================
#
# These methods use raw time divided by number_per_run; this is an
# extrapolation and hides the fact that different number_per_run will
# result in different amortization of overheads, however if Timer has
# selected an appropriate number_per_run then this is a non-issue, and
# forcing users to handle that division would result in a poor experience.
@property
def times(self) -> list[float]:
return [t / self.number_per_run for t in self.raw_times]
@property
def median(self) -> float:
self._lazy_init()
return self._median
@property
def mean(self) -> float:
self._lazy_init()
return self._mean
@property
def iqr(self) -> float:
self._lazy_init()
return self._p75 - self._p25
@property
def significant_figures(self) -> int:
"""Approximate significant figure estimate.
This property is intended to give a convenient way to estimate the
precision of a measurement. It only uses the interquartile region to
estimate statistics to try to mitigate skew from the tails, and
uses a static z value of 1.645 since it is not expected to be used
for small values of `n`, so z can approximate `t`.
The significant figure estimation used in conjunction with the
`trim_sigfig` method to provide a more human interpretable data
summary. __repr__ does not use this method; it simply displays raw
values. Significant figure estimation is intended for `Compare`.
"""
self._lazy_init()
n_total = len(self._sorted_times)
lower_bound = int(n_total // 4)
upper_bound = int(torch.tensor(3 * n_total / 4).ceil())
interquartile_points: tuple[float, ...] = self._sorted_times[lower_bound:upper_bound]
std = torch.tensor(interquartile_points).std(unbiased=False).item()
sqrt_n = torch.tensor(len(interquartile_points)).sqrt().item()
# Rough estimates. These are by no means statistically rigorous.
confidence_interval = max(1.645 * std / sqrt_n, _MIN_CONFIDENCE_INTERVAL)
relative_ci = torch.tensor(self._median / confidence_interval).log10().item()
num_significant_figures = int(torch.tensor(relative_ci).floor())
return min(max(num_significant_figures, 1), _MAX_SIGNIFICANT_FIGURES)
@property
def has_warnings(self) -> bool:
self._lazy_init()
return bool(self._warnings)
def _lazy_init(self) -> None:
if self.raw_times and not self._sorted_times:
self._sorted_times = tuple(sorted(self.times))
_sorted_times = torch.tensor(self._sorted_times, dtype=torch.float64)
self._median = _sorted_times.quantile(.5).item()
self._mean = _sorted_times.mean().item()
self._p25 = _sorted_times.quantile(.25).item()
self._p75 = _sorted_times.quantile(.75).item()
def add_warning(msg: str) -> None:
rel_iqr = self.iqr / self.median * 100
self._warnings += (
f" WARNING: Interquartile range is {rel_iqr:.1f}% "
f"of the median measurement.\n {msg}",
)
if not self.meets_confidence(_IQR_GROSS_WARN_THRESHOLD):
add_warning("This suggests significant environmental influence.")
elif not self.meets_confidence(_IQR_WARN_THRESHOLD):
add_warning("This could indicate system fluctuation.")
def meets_confidence(self, threshold: float = _IQR_WARN_THRESHOLD) -> bool:
return self.iqr / self.median < threshold
@property
def title(self) -> str:
return self.task_spec.title
@property
def env(self) -> str:
return (
"Unspecified env" if self.taskspec.env is None
else cast(str, self.taskspec.env)
)
@property
def as_row_name(self) -> str:
return self.sub_label or self.stmt or "[Unknown]"
def __repr__(self) -> str:
"""
Example repr:
<utils.common.Measurement object at 0x7f395b6ac110>
Broadcasting add (4x8)
Median: 5.73 us
IQR: 2.25 us (4.01 to 6.26)
372 measurements, 100 runs per measurement, 1 thread
WARNING: Interquartile range is 39.4% of the median measurement.
This suggests significant environmental influence.
"""
self._lazy_init()
skip_line, newline = "MEASUREMENT_REPR_SKIP_LINE", "\n"
n = len(self._sorted_times)
time_unit, time_scale = select_unit(self._median)
iqr_filter = '' if n >= 4 else skip_line
repr_str = f"""
{super().__repr__()}
{self.task_spec.summarize()}
{'Median: ' if n > 1 else ''}{self._median / time_scale:.2f} {time_unit}
{iqr_filter}IQR: {self.iqr / time_scale:.2f} {time_unit} ({self._p25 / time_scale:.2f} to {self._p75 / time_scale:.2f})
{n} measurement{'s' if n > 1 else ''}, {self.number_per_run} runs {'per measurement,' if n > 1 else ','} {self.num_threads} thread{'s' if self.num_threads > 1 else ''}
{newline.join(self._warnings)}""".strip() # noqa: B950
return "\n".join(l for l in repr_str.splitlines(keepends=False) if skip_line not in l)
@staticmethod
def merge(measurements: Iterable["Measurement"]) -> list["Measurement"]:
"""Convenience method for merging replicates.
Merge will extrapolate times to `number_per_run=1` and will not
transfer any metadata. (Since it might differ between replicates)
"""
grouped_measurements: collections.defaultdict[TaskSpec, list[Measurement]] = collections.defaultdict(list)
for m in measurements:
grouped_measurements[m.task_spec].append(m)
def merge_group(task_spec: TaskSpec, group: list["Measurement"]) -> "Measurement":
times: list[float] = []
for m in group:
# Different measurements could have different `number_per_run`,
# so we call `.times` which normalizes the results.
times.extend(m.times)
return Measurement(
number_per_run=1,
raw_times=times,
task_spec=task_spec,
metadata=None,
)
return [merge_group(t, g) for t, g in grouped_measurements.items()]
def select_unit(t: float) -> tuple[str, float]:
"""Determine how to scale times for O(1) magnitude.
This utility is used to format numbers for human consumption.
"""
time_unit = {-3: "ns", -2: "us", -1: "ms"}.get(int(torch.tensor(t).log10().item() // 3), "s")
time_scale = {"ns": 1e-9, "us": 1e-6, "ms": 1e-3, "s": 1}[time_unit]
return time_unit, time_scale
def unit_to_english(u: str) -> str:
return {
"ns": "nanosecond",
"us": "microsecond",
"ms": "millisecond",
"s": "second",
}[u]
def trim_sigfig(x: float, n: int) -> float:
"""Trim `x` to `n` significant figures. (e.g. 3.14159, 2 -> 3.10000)"""
assert n == int(n)
magnitude = int(torch.tensor(x).abs().log10().ceil().item())
scale = 10 ** (magnitude - n)
return float(torch.tensor(x / scale).round() * scale)
def ordered_unique(elements: Iterable[Any]) -> list[Any]:
return list(collections.OrderedDict(dict.fromkeys(elements)).keys())
@contextlib.contextmanager
def set_torch_threads(n: int) -> Iterator[None]:
prior_num_threads = torch.get_num_threads()
try:
torch.set_num_threads(n)
yield
finally:
torch.set_num_threads(prior_num_threads)
def _make_temp_dir(prefix: Optional[str] = None, gc_dev_shm: bool = False) -> str:
"""Create a temporary directory. The caller is responsible for cleanup.
This function is conceptually similar to `tempfile.mkdtemp`, but with
the key additional feature that it will use shared memory if the
`BENCHMARK_USE_DEV_SHM` environment variable is set. This is an
implementation detail, but an important one for cases where many Callgrind
measurements are collected at once. (Such as when collecting
microbenchmarks.)
This is an internal utility, and is exported solely so that microbenchmarks
can reuse the util.
"""
use_dev_shm: bool = (os.getenv("BENCHMARK_USE_DEV_SHM") or "").lower() in ("1", "true")
if use_dev_shm:
root = "/dev/shm/pytorch_benchmark_utils"
assert os.name == "posix", f"tmpfs (/dev/shm) is POSIX only, current platform is {os.name}"
assert os.path.exists("/dev/shm"), "This system does not appear to support tmpfs (/dev/shm)."
os.makedirs(root, exist_ok=True)
# Because we're working in shared memory, it is more important than
# usual to clean up ALL intermediate files. However we don't want every
# worker to walk over all outstanding directories, so instead we only
# check when we are sure that it won't lead to contention.
if gc_dev_shm:
for i in os.listdir(root):
owner_file = os.path.join(root, i, "owner.pid")
if not os.path.exists(owner_file):
continue
with open(owner_file) as f:
owner_pid = int(f.read())
if owner_pid == os.getpid():
continue
try:
# https://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid-in-python
os.kill(owner_pid, 0)
except OSError:
print(f"Detected that {os.path.join(root, i)} was orphaned in shared memory. Cleaning up.")
shutil.rmtree(os.path.join(root, i))
else:
root = tempfile.gettempdir()
# We include the time so names sort by creation time, and add a UUID
# to ensure we don't collide.
name = f"{prefix or tempfile.gettempprefix()}__{int(time.time())}__{uuid.uuid4()}"
path = os.path.join(root, name)
os.makedirs(path, exist_ok=False)
if use_dev_shm:
with open(os.path.join(path, "owner.pid"), "w") as f:
f.write(str(os.getpid()))
return path
```
|
==============================================================================================================================
SOURCE CODE FILE: compare.py
LINES: 3
SIZE: 13.30 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\compare.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
"""Display class to aggregate and print the results of many measurements."""
import collections
import enum
import itertools as it
from typing import Optional
from torch.utils.benchmark.utils import common
from torch import tensor as _tensor
import operator
__all__ = ["Colorize", "Compare"]
BEST = "\033[92m"
GOOD = "\033[34m"
BAD = "\033[2m\033[91m"
VERY_BAD = "\033[31m"
BOLD = "\033[1m"
TERMINATE = "\033[0m"
class Colorize(enum.Enum):
NONE = "none"
COLUMNWISE = "columnwise"
ROWWISE = "rowwise"
# Classes to separate internal bookkeeping from what is rendered.
class _Column:
def __init__(
self,
grouped_results: list[tuple[Optional[common.Measurement], ...]],
time_scale: float,
time_unit: str,
trim_significant_figures: bool,
highlight_warnings: bool,
):
self._grouped_results = grouped_results
self._flat_results = [*it.chain.from_iterable(grouped_results)]
self._time_scale = time_scale
self._time_unit = time_unit
self._trim_significant_figures = trim_significant_figures
self._highlight_warnings = (
highlight_warnings
and any(r.has_warnings for r in self._flat_results if r)
)
leading_digits = [
int(_tensor(r.median / self._time_scale).log10().ceil()) if r else None
for r in self._flat_results
]
unit_digits = max(d for d in leading_digits if d is not None)
decimal_digits = min(
max(m.significant_figures - digits, 0)
for digits, m in zip(leading_digits, self._flat_results)
if (m is not None) and (digits is not None)
) if self._trim_significant_figures else 1
length = unit_digits + decimal_digits + (1 if decimal_digits else 0)
self._template = f"{{:>{length}.{decimal_digits}f}}{{:>{7 if self._highlight_warnings else 0}}}"
def get_results_for(self, group):
return self._grouped_results[group]
def num_to_str(self, value: Optional[float], estimated_sigfigs: int, spread: Optional[float]):
if value is None:
return " " * len(self.num_to_str(1, estimated_sigfigs, None))
if self._trim_significant_figures:
value = common.trim_sigfig(value, estimated_sigfigs)
return self._template.format(
value,
f" (! {spread * 100:.0f}%)" if self._highlight_warnings and spread is not None else "")
def optional_min(seq):
l = list(seq)
return None if len(l) == 0 else min(l)
class _Row:
def __init__(self, results, row_group, render_env, env_str_len,
row_name_str_len, time_scale, colorize, num_threads=None):
super().__init__()
self._results = results
self._row_group = row_group
self._render_env = render_env
self._env_str_len = env_str_len
self._row_name_str_len = row_name_str_len
self._time_scale = time_scale
self._colorize = colorize
self._columns: tuple[_Column, ...] = ()
self._num_threads = num_threads
def register_columns(self, columns: tuple[_Column, ...]):
self._columns = columns
def as_column_strings(self):
concrete_results = [r for r in self._results if r is not None]
env = f"({concrete_results[0].env})" if self._render_env else ""
env = env.ljust(self._env_str_len + 4)
output = [" " + env + concrete_results[0].as_row_name]
for m, col in zip(self._results, self._columns or ()):
if m is None:
output.append(col.num_to_str(None, 1, None))
else:
output.append(col.num_to_str(
m.median / self._time_scale,
m.significant_figures,
m.iqr / m.median if m.has_warnings else None
))
return output
@staticmethod
def color_segment(segment, value, best_value):
if value <= best_value * 1.01 or value <= best_value + 100e-9:
return BEST + BOLD + segment + TERMINATE * 2
if value <= best_value * 1.1:
return GOOD + BOLD + segment + TERMINATE * 2
if value >= best_value * 5:
return VERY_BAD + BOLD + segment + TERMINATE * 2
if value >= best_value * 2:
return BAD + segment + TERMINATE * 2
return segment
def row_separator(self, overall_width):
return (
[f"{self._num_threads} threads: ".ljust(overall_width, "-")]
if self._num_threads is not None else []
)
def finalize_column_strings(self, column_strings, col_widths):
best_values = [-1 for _ in column_strings]
if self._colorize == Colorize.ROWWISE:
row_min = min(r.median for r in self._results if r is not None)
best_values = [row_min for _ in column_strings]
elif self._colorize == Colorize.COLUMNWISE:
best_values = [
optional_min(r.median for r in column.get_results_for(self._row_group) if r is not None)
for column in (self._columns or ())
]
row_contents = [column_strings[0].ljust(col_widths[0])]
for col_str, width, result, best_value in zip(column_strings[1:], col_widths[1:], self._results, best_values):
col_str = col_str.center(width)
if self._colorize != Colorize.NONE and result is not None and best_value is not None:
col_str = self.color_segment(col_str, result.median, best_value)
row_contents.append(col_str)
return row_contents
class Table:
def __init__(
self,
results: list[common.Measurement],
colorize: Colorize,
trim_significant_figures: bool,
highlight_warnings: bool
):
assert len({r.label for r in results}) == 1
self.results = results
self._colorize = colorize
self._trim_significant_figures = trim_significant_figures
self._highlight_warnings = highlight_warnings
self.label = results[0].label
self.time_unit, self.time_scale = common.select_unit(
min(r.median for r in results)
)
self.row_keys = common.ordered_unique([self.row_fn(i) for i in results])
self.row_keys.sort(key=operator.itemgetter(slice(2))) # preserve stmt order
self.column_keys = common.ordered_unique([self.col_fn(i) for i in results])
self.rows, self.columns = self.populate_rows_and_columns()
@staticmethod
def row_fn(m: common.Measurement) -> tuple[int, Optional[str], str]:
return m.num_threads, m.env, m.as_row_name
@staticmethod
def col_fn(m: common.Measurement) -> Optional[str]:
return m.description
def populate_rows_and_columns(self) -> tuple[tuple[_Row, ...], tuple[_Column, ...]]:
rows: list[_Row] = []
columns: list[_Column] = []
ordered_results: list[list[Optional[common.Measurement]]] = [
[None for _ in self.column_keys]
for _ in self.row_keys
]
row_position = {key: i for i, key in enumerate(self.row_keys)}
col_position = {key: i for i, key in enumerate(self.column_keys)}
for r in self.results:
i = row_position[self.row_fn(r)]
j = col_position[self.col_fn(r)]
ordered_results[i][j] = r
unique_envs = {r.env for r in self.results}
render_env = len(unique_envs) > 1
env_str_len = max(len(i) for i in unique_envs) if render_env else 0
row_name_str_len = max(len(r.as_row_name) for r in self.results)
prior_num_threads = -1
prior_env = ""
row_group = -1
rows_by_group: list[list[list[Optional[common.Measurement]]]] = []
for (num_threads, env, _), row in zip(self.row_keys, ordered_results):
thread_transition = (num_threads != prior_num_threads)
if thread_transition:
prior_num_threads = num_threads
prior_env = ""
row_group += 1
rows_by_group.append([])
rows.append(
_Row(
results=row,
row_group=row_group,
render_env=(render_env and env != prior_env),
env_str_len=env_str_len,
row_name_str_len=row_name_str_len,
time_scale=self.time_scale,
colorize=self._colorize,
num_threads=num_threads if thread_transition else None,
)
)
rows_by_group[-1].append(row)
prior_env = env
for i in range(len(self.column_keys)):
grouped_results = [tuple(row[i] for row in g) for g in rows_by_group]
column = _Column(
grouped_results=grouped_results,
time_scale=self.time_scale,
time_unit=self.time_unit,
trim_significant_figures=self._trim_significant_figures,
highlight_warnings=self._highlight_warnings,)
columns.append(column)
rows_tuple, columns_tuple = tuple(rows), tuple(columns)
for ri in rows_tuple:
ri.register_columns(columns_tuple)
return rows_tuple, columns_tuple
def render(self) -> str:
string_rows = [[""] + self.column_keys]
string_rows.extend(r.as_column_strings() for r in self.rows)
num_cols = max(len(i) for i in string_rows)
for sr in string_rows:
sr.extend(["" for _ in range(num_cols - len(sr))])
col_widths = [max(len(j) for j in i) for i in zip(*string_rows)]
finalized_columns = [" | ".join(i.center(w) for i, w in zip(string_rows[0], col_widths))]
overall_width = len(finalized_columns[0])
for string_row, row in zip(string_rows[1:], self.rows):
finalized_columns.extend(row.row_separator(overall_width))
finalized_columns.append(" | ".join(row.finalize_column_strings(string_row, col_widths)))
newline = "\n"
has_warnings = self._highlight_warnings and any(ri.has_warnings for ri in self.results)
return f"""
[{(' ' + (self.label or '') + ' ').center(overall_width - 2, '-')}]
{newline.join(finalized_columns)}
Times are in {common.unit_to_english(self.time_unit)}s ({self.time_unit}).
{'(! XX%) Measurement has high variance, where XX is the IQR / median * 100.' + newline if has_warnings else ""}"""[1:]
class Compare:
"""Helper class for displaying the results of many measurements in a
formatted table.
The table format is based on the information fields provided in
:class:`torch.utils.benchmark.Timer` (`description`, `label`, `sub_label`,
`num_threads`, etc).
The table can be directly printed using :meth:`print` or casted as a `str`.
For a full tutorial on how to use this class, see:
https://pytorch.org/tutorials/recipes/recipes/benchmark.html
Args:
results: List of Measurment to display.
"""
def __init__(self, results: list[common.Measurement]):
self._results: list[common.Measurement] = []
self.extend_results(results)
self._trim_significant_figures = False
self._colorize = Colorize.NONE
self._highlight_warnings = False
def __str__(self):
return "\n".join(self._render())
def extend_results(self, results):
"""Append results to already stored ones.
All added results must be instances of ``Measurement``.
"""
for r in results:
if not isinstance(r, common.Measurement):
raise ValueError(
"Expected an instance of `Measurement`, " f"got {type(r)} instead."
)
self._results.extend(results)
def trim_significant_figures(self):
"""Enables trimming of significant figures when building the formatted table."""
self._trim_significant_figures = True
def colorize(self, rowwise=False):
"""Colorize formatted table.
Colorize columnwise by default.
"""
self._colorize = Colorize.ROWWISE if rowwise else Colorize.COLUMNWISE
def highlight_warnings(self):
"""Enables warning highlighting when building formatted table."""
self._highlight_warnings = True
def print(self):
"""Print formatted table"""
print(str(self))
def _render(self):
results = common.Measurement.merge(self._results)
grouped_results = self._group_by_label(results)
output = [self._layout(group) for group in grouped_results.values()]
return output
def _group_by_label(self, results: list[common.Measurement]):
grouped_results: collections.defaultdict[str, list[common.Measurement]] = collections.defaultdict(list)
for r in results:
grouped_results[r.label].append(r)
return grouped_results
def _layout(self, results: list[common.Measurement]):
table = Table(
results,
self._colorize,
self._trim_significant_figures,
self._highlight_warnings
)
return table.render()
```
|
==============================================================================================================================
SOURCE CODE FILE: compile.py
LINES: 1
SIZE: 7.61 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\compile.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from typing import Any, Callable, cast, Optional, Union
import torch
import torch._dynamo
from torch._dynamo.testing import CompileCounterWithBackend
from torch.utils.benchmark import Timer
__all__ = ["bench_all", "benchmark_compile"]
_warned_tensor_cores = False
_default_float_32_precision = torch.get_float32_matmul_precision()
try:
from tabulate import tabulate
HAS_TABULATE = True
except ModuleNotFoundError:
HAS_TABULATE = False
tabulate = None # type: ignore[assignment]
print("tabulate is not installed, please pip install tabulate to use this utility")
if HAS_TABULATE:
def _enable_tensor_cores():
global _warned_tensor_cores
if torch.cuda.is_available():
if torch.backends.cuda.matmul.allow_tf32 is False and torch.cuda.get_device_capability() >= (8, 0):
torch.set_float32_matmul_precision("high")
if not _warned_tensor_cores:
print("Your GPU supports tensor cores")
print("we will enable it automatically by setting `torch.set_float32_matmul_precision('high')`")
_warned_tensor_cores = True
def _disable_tensor_cores():
torch.set_float32_matmul_precision(_default_float_32_precision)
def bench_loop(
model: Union[torch.nn.Module, Callable],
sample_input: Union[torch.Tensor, Any],
num_iters: int = 5,
optimizer: Optional[torch.optim.Optimizer] = None,
loss_fn: Optional[Callable] = None,
):
# Define the statement and setup for the benchmark
if optimizer and loss_fn:
# Training mode
stmt = """
output = model(sample_input)
loss = loss_fn(output) if loss_fn else output.sum()
loss.backward()
optimizer.step()
optimizer.zero_grad()
"""
else:
# Inference mode
stmt = "model(sample_input)"
# Create the Timer object
timer = Timer(
stmt=stmt,
globals={"model": model, "sample_input": sample_input, "optimizer": optimizer, "loss_fn": loss_fn},
)
result = timer.timeit(number=num_iters)
# Get the average time per iteration in milliseconds
avg_time = result.mean * 1000
return round(avg_time, 2)
def benchmark_compile(
model: Union[torch.nn.Module, Callable],
sample_input: Union[torch.Tensor, Any],
num_iters: int = 5,
backend: Optional[str] = None,
mode: Optional[str] = "default",
optimizer: Optional[torch.optim.Optimizer] = None,
loss_fn : Union[torch.nn.Module, Callable, None] = None,
):
"""
Use this utility to benchmark torch.compile
"""
if backend:
try:
torch._dynamo.reset()
compile_counter_with_backend = CompileCounterWithBackend(backend)
opt_model = torch.compile(model, backend=compile_counter_with_backend, mode=mode)
# Compilation only happens after the first inference
compilation_time = bench_loop(opt_model, sample_input, 1, optimizer, loss_fn)
running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn)
if compile_counter_with_backend.frame_count == 0:
raise RuntimeError("No compilation occurred during benchmarking.")
if compile_counter_with_backend.frame_count > 1:
raise RuntimeError("Recompilation occurred during benchmarking.")
except Exception as e:
print(e)
print(f"Failed to compile {backend} with mode {mode}")
return None, None
else:
opt_model = model
compilation_time = None
running_time = bench_loop(opt_model, sample_input, num_iters, optimizer, loss_fn)
compilation_time = round(compilation_time, 2) if compilation_time else None
running_time = round(running_time, 2) if running_time else None
return compilation_time, running_time
def bench_all(
model : Union[torch.nn.Module, Callable],
sample_input: Union[torch.Tensor, Any],
num_iters : int = 5,
optimizer: Optional[torch.optim.Optimizer] = None,
loss_fn : Union[torch.nn.Module, Callable, None] = None,
):
"""
This is a simple utility that can be used to benchmark torch.compile
In particular it ensures that your GPU is setup to use tensor cores if it supports its
It also tries out all the main backends and prints a table of results so you can easily compare them all
Many of the backendds have their own optional dependencies so please pip install them seperately
You will get one table for inference and another for training
If you'd like to leverage this utility for training make sure to pass in a torch.optim.Optimizer
The important warnings are
Your GPU supports tensor cores
we will enable it automatically by setting `torch.set_float32_matmul_precision('high')`
If a compilation fails for any reason including the dependency not being included
then we will print Failed to compile {backend} with mode {mode}
"""
field_names = ["Train/Inference", "Backend", "Mode", "Compilation Time", "Average Running Time"]
table = []
eager_time = None
torch._dynamo.reset()
_, eager_time = benchmark_compile(model, sample_input, num_iters, None, None, optimizer)
table.append(
[("Training" if optimizer else "Inference"), "Eager", "-", "-", f"{eager_time} ms"]
)
for backend in torch._dynamo.list_backends():
if backend == "inductor":
mode_options = cast(list[Optional[str]], list(torch._inductor.list_mode_options().keys())) + [None]
for mode in mode_options:
if mode == "default":
continue
torch._dynamo.reset()
try:
if torch.cuda.is_available():
_enable_tensor_cores()
compilation_time, running_time = benchmark_compile(
model, sample_input, num_iters, backend, mode, optimizer, loss_fn)
finally:
if torch.cuda.is_available():
_disable_tensor_cores()
table.append([
("Training" if optimizer else "Inference"),
backend if backend else "-",
mode if mode is not None else "-",
f"{compilation_time} ms " if compilation_time else "-",
f"{running_time} ms " if running_time else "-",
])
else:
torch._dynamo.reset()
compilation_time, running_time = benchmark_compile(
model, sample_input, num_iters, backend, None, optimizer, loss_fn)
if running_time is not None:
table.append([
("Training" if optimizer else "Inference"),
backend, "-",
f"{compilation_time} ms " or "-",
f"{running_time} ms ",
])
return tabulate(table, headers=field_names, tablefmt="github")
```
|
==============================================================================================================================
SOURCE CODE FILE: cpp_jit.py
LINES: 1
SIZE: 6.81 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\cpp_jit.py
ENCODING: utf-8
```py
"""JIT C++ strings into executables."""
import atexit
import os
import re
import shutil
import textwrap
import threading
from typing import Any, Optional
import torch
from torch.utils.benchmark.utils._stubs import CallgrindModuleType, TimeitModuleType
from torch.utils.benchmark.utils.common import _make_temp_dir
from torch.utils import cpp_extension
LOCK = threading.Lock()
SOURCE_ROOT = os.path.split(os.path.abspath(__file__))[0]
# We calculate uuid once at import time so that separate processes will have
# separate build roots, but threads will share the same build root.
# `cpp_extension` uses build root as part of the cache key, so per-invocation
# uuid's (e.g. different build root per _compile_template call) would lead to
# a 0% cache hit rate and spurious recompilation. Consider the following:
# ```
# setup = "auto x = torch::ones({1024, 1024});"
# stmt = "torch::mm(x, x);"
# for num_threads in [1, 2, 4, 8]:
# print(Timer(stmt, setup, num_threads=num_threads, language="c++").blocked_autorange())
# ````
# `setup` and `stmt` do not change, so we can reuse the executable from the
# first pass through the loop.
_BUILD_ROOT: Optional[str] = None
def _get_build_root() -> str:
global _BUILD_ROOT
if _BUILD_ROOT is None:
_BUILD_ROOT = _make_temp_dir(prefix="benchmark_utils_jit_build")
atexit.register(shutil.rmtree, _BUILD_ROOT)
return _BUILD_ROOT
# BACK_TESTING_NOTE:
# There are two workflows where this code could be used. One is the obvious
# case where someone simply builds or installs PyTorch and uses Timer.
# The other is that the entire `torch/utils/benchmark` folder from a CURRENT
# PyTorch checkout is copy-pasted into a much OLDER version of the PyTorch
# source code. This is what we refer to here as "back testing". The rationale
# is that we might want to use current tooling to study some aspect of an
# earlier version of PyTorch. (e.g. a regression.)
#
# The problem is that Timer relies on several aspects of core PyTorch, namely
# some binding functions for Valgrind symbols in `torch._C` and the
# `torch.__config__._cxx_flags()` method. If we were to naively copy code
# around this wouldn't work as the symbols of interest aren't present in
# earlier versions of PyTorch. In order to work around this, we must add back
# testing shims. These shims will never activate during normal use, but will
# allow Timer to function outside of the "correct" version of PyTorch by
# emulating functionality that was added later.
#
# These shims are temporary, and as Timer becomes more integrated with
# PyTorch the cost and complexity of such shims will increase. Once back
# testing is no longer required (which is to say we have done enough historic
# analysis and the shims no longer justify their maintenance and code
# complexity costs) back testing paths will be removed.
CXX_FLAGS: Optional[list[str]]
if hasattr(torch.__config__, "_cxx_flags"):
try:
CXX_FLAGS = torch.__config__._cxx_flags().strip().split()
if CXX_FLAGS is not None and "-g" not in CXX_FLAGS:
CXX_FLAGS.append("-g")
# remove "-W" flags to allow build benchmarks
# with a relaxed constraint of compiler versions
if CXX_FLAGS is not None:
CXX_FLAGS = list(filter(lambda x: not x.startswith("-W"), CXX_FLAGS))
except RuntimeError:
# We are in FBCode.
CXX_FLAGS = None
else:
# FIXME: Remove when back testing is no longer required.
CXX_FLAGS = ["-O2", "-fPIC", "-g"]
EXTRA_INCLUDE_PATHS: list[str] = [os.path.join(SOURCE_ROOT, "valgrind_wrapper")]
CONDA_PREFIX = os.getenv("CONDA_PREFIX")
if CONDA_PREFIX is not None:
# Load will automatically search /usr/include, but not conda include.
EXTRA_INCLUDE_PATHS.append(os.path.join(CONDA_PREFIX, "include"))
COMPAT_CALLGRIND_BINDINGS: Optional[CallgrindModuleType] = None
def get_compat_bindings() -> CallgrindModuleType:
with LOCK:
global COMPAT_CALLGRIND_BINDINGS
if COMPAT_CALLGRIND_BINDINGS is None:
COMPAT_CALLGRIND_BINDINGS = cpp_extension.load(
name="callgrind_bindings",
sources=[os.path.join(
SOURCE_ROOT,
"valgrind_wrapper",
"compat_bindings.cpp"
)],
extra_cflags=CXX_FLAGS,
extra_include_paths=EXTRA_INCLUDE_PATHS,
)
return COMPAT_CALLGRIND_BINDINGS
def _compile_template(
*,
stmt: str,
setup: str,
global_setup: str,
src: str,
is_standalone: bool
) -> Any:
for before, after, indentation in (
("// GLOBAL_SETUP_TEMPLATE_LOCATION", global_setup, 0),
("// SETUP_TEMPLATE_LOCATION", setup, 4),
("// STMT_TEMPLATE_LOCATION", stmt, 8)
):
# C++ doesn't care about indentation so this code isn't load
# bearing the way it is with Python, but this makes the source
# look nicer if a human has to look at it.
src = re.sub(
before,
textwrap.indent(after, " " * indentation)[indentation:],
src
)
# We want to isolate different Timers. However `cpp_extension` will
# cache builds which will significantly reduce the cost of repeated
# invocations.
with LOCK:
name = f"timer_cpp_{abs(hash(src))}"
build_dir = os.path.join(_get_build_root(), name)
os.makedirs(build_dir, exist_ok=True)
src_path = os.path.join(build_dir, "timer_src.cpp")
with open(src_path, "w") as f:
f.write(src)
# `cpp_extension` has its own locking scheme, so we don't need our lock.
return cpp_extension.load(
name=name,
sources=[src_path],
build_directory=build_dir,
extra_cflags=CXX_FLAGS,
extra_include_paths=EXTRA_INCLUDE_PATHS,
is_python_module=not is_standalone,
is_standalone=is_standalone,
)
def compile_timeit_template(*, stmt: str, setup: str, global_setup: str) -> TimeitModuleType:
template_path: str = os.path.join(SOURCE_ROOT, "timeit_template.cpp")
with open(template_path) as f:
src: str = f.read()
module = _compile_template(stmt=stmt, setup=setup, global_setup=global_setup, src=src, is_standalone=False)
assert isinstance(module, TimeitModuleType)
return module
def compile_callgrind_template(*, stmt: str, setup: str, global_setup: str) -> str:
template_path: str = os.path.join(SOURCE_ROOT, "valgrind_wrapper", "timer_callgrind_template.cpp")
with open(template_path) as f:
src: str = f.read()
target = _compile_template(stmt=stmt, setup=setup, global_setup=global_setup, src=src, is_standalone=True)
assert isinstance(target, str)
return target
```
|
=============================================================================================================================
SOURCE CODE FILE: fuzzer.py
LINES: 2
SIZE: 18.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\fuzzer.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import functools
import itertools as it
from typing import Any, Callable, Optional, Union
import torch
__all__ = [
"Fuzzer",
"FuzzedParameter", "ParameterAlias",
"FuzzedTensor",
]
_DISTRIBUTIONS = (
"loguniform",
"uniform",
)
class FuzzedParameter:
"""Specification for a parameter to be generated during fuzzing."""
def __init__(
self,
name: str,
minval: Optional[Union[int, float]] = None,
maxval: Optional[Union[int, float]] = None,
distribution: Optional[Union[str, dict[Any, float]]] = None,
strict: bool = False,
):
"""
Args:
name:
A string name with which to identify the parameter.
FuzzedTensors can reference this string in their
specifications.
minval:
The lower bound for the generated value. See the description
of `distribution` for type behavior.
maxval:
The upper bound for the generated value. Type behavior is
identical to `minval`.
distribution:
Specifies the distribution from which this parameter should
be drawn. There are three possibilities:
- "loguniform"
Samples between `minval` and `maxval` (inclusive) such
that the probabilities are uniform in log space. As a
concrete example, if minval=1 and maxval=100, a sample
is as likely to fall in [1, 10) as it is [10, 100].
- "uniform"
Samples are chosen with uniform probability between
`minval` and `maxval` (inclusive). If either `minval`
or `maxval` is a float then the distribution is the
continuous uniform distribution; otherwise samples
are constrained to the integers.
- dict:
If a dict is passed, the keys are taken to be choices
for the variables and the values are interpreted as
probabilities. (And must sum to one.)
If a dict is passed, `minval` and `maxval` must not be set.
Otherwise, they must be set.
strict:
If a parameter is strict, it will not be included in the
iterative resampling process which Fuzzer uses to find a
valid parameter configuration. This allows an author to
prevent skew from resampling for a given parameter (for
instance, a low size limit could inadvertently bias towards
Tensors with fewer dimensions) at the cost of more iterations
when generating parameters.
"""
self._name = name
self._minval = minval
self._maxval = maxval
self._distribution = self._check_distribution(distribution)
self.strict = strict
@property
def name(self):
return self._name
def sample(self, state):
if self._distribution == "loguniform":
return self._loguniform(state)
if self._distribution == "uniform":
return self._uniform(state)
if isinstance(self._distribution, dict):
return self._custom_distribution(state)
def _check_distribution(self, distribution):
if not isinstance(distribution, dict):
assert distribution in _DISTRIBUTIONS
else:
assert not any(i < 0 for i in distribution.values()), "Probabilities cannot be negative"
assert abs(sum(distribution.values()) - 1) <= 1e-5, "Distribution is not normalized"
assert self._minval is None
assert self._maxval is None
return distribution
def _loguniform(self, state):
import numpy as np
output = int(2 ** state.uniform(
low=np.log2(self._minval) if self._minval is not None else None,
high=np.log2(self._maxval) if self._maxval is not None else None,
))
if self._minval is not None and output < self._minval:
return self._minval
if self._maxval is not None and output > self._maxval:
return self._maxval
return output
def _uniform(self, state):
if isinstance(self._minval, int) and isinstance(self._maxval, int):
return int(state.randint(low=self._minval, high=self._maxval + 1))
return state.uniform(low=self._minval, high=self._maxval)
def _custom_distribution(self, state):
import numpy as np
# If we directly pass the keys to `choice`, numpy will convert
# them to numpy dtypes.
index = state.choice(
np.arange(len(self._distribution)),
p=tuple(self._distribution.values()))
return list(self._distribution.keys())[index]
class ParameterAlias:
"""Indicates that a parameter should alias the value of another parameter.
When used in conjunction with a custom distribution, this allows fuzzed
tensors to represent a broader range of behaviors. For example, the
following sometimes produces Tensors which broadcast:
Fuzzer(
parameters=[
FuzzedParameter("x_len", 4, 1024, distribution="uniform"),
# `y` will either be size one, or match the size of `x`.
FuzzedParameter("y_len", distribution={
0.5: 1,
0.5: ParameterAlias("x_len")
}),
],
tensors=[
FuzzedTensor("x", size=("x_len",)),
FuzzedTensor("y", size=("y_len",)),
],
)
Chains of alias' are allowed, but may not contain cycles.
"""
def __init__(self, alias_to):
self.alias_to = alias_to
def __repr__(self):
return f"ParameterAlias[alias_to: {self.alias_to}]"
def dtype_size(dtype):
if dtype == torch.bool:
return 1
if dtype.is_floating_point or dtype.is_complex:
return int(torch.finfo(dtype).bits / 8)
return int(torch.iinfo(dtype).bits / 8)
def prod(values, base=1):
"""np.prod can overflow, so for sizes the product should be done in Python.
Even though np.prod type promotes to int64, it can still overflow in which
case the negative value will pass the size check and OOM when attempting to
actually allocate the Tensor.
"""
return functools.reduce(lambda x, y: int(x) * int(y), values, base)
class FuzzedTensor:
def __init__(
self,
name: str,
size: tuple[Union[str, int], ...],
steps: Optional[tuple[Union[str, int], ...]] = None,
probability_contiguous: float = 0.5,
min_elements: Optional[int] = None,
max_elements: Optional[int] = None,
max_allocation_bytes: Optional[int] = None,
dim_parameter: Optional[str] = None,
roll_parameter: Optional[str] = None,
dtype=torch.float32,
cuda=False,
tensor_constructor: Optional[Callable] = None
):
"""
Args:
name:
A string identifier for the generated Tensor.
size:
A tuple of integers or strings specifying the size of the generated
Tensor. String values will replaced with a concrete int during the
generation process, while ints are simply passed as literals.
steps:
An optional tuple with the same length as `size`. This indicates
that a larger Tensor should be allocated, and then sliced to
produce the generated Tensor. For instance, if size is (4, 8)
and steps is (1, 4), then a tensor `t` of size (4, 32) will be
created and then `t[:, ::4]` will be used. (Allowing one to test
Tensors with strided memory.)
probability_contiguous:
A number between zero and one representing the chance that the
generated Tensor has a contiguous memory layout. This is achieved by
randomly permuting the shape of a Tensor, calling `.contiguous()`,
and then permuting back. This is applied before `steps`, which can
also cause a Tensor to be non-contiguous.
min_elements:
The minimum number of parameters that this Tensor must have for a
set of parameters to be valid. (Otherwise they are resampled.)
max_elements:
Like `min_elements`, but setting an upper bound.
max_allocation_bytes:
Like `max_elements`, but for the size of Tensor that must be
allocated prior to slicing for `steps` (if applicable). For
example, a FloatTensor with size (1024, 1024) and steps (4, 4)
would have 1M elements, but would require a 64 MB allocation.
dim_parameter:
The length of `size` and `steps` will be truncated to this value.
This allows Tensors of varying dimensions to be generated by the
Fuzzer.
dtype:
The PyTorch dtype of the generated Tensor.
cuda:
Whether to place the Tensor on a GPU.
tensor_constructor:
Callable which will be used instead of the default Tensor
construction method. This allows the author to enforce properties
of the Tensor (e.g. it can only have certain values). The dtype and
concrete shape of the Tensor to be created will be passed, and
concrete values of all parameters will be passed as kwargs. Note
that transformations to the result (permuting, slicing) will be
performed by the Fuzzer; the tensor_constructor is only responsible
for creating an appropriately sized Tensor.
"""
self._name = name
self._size = size
self._steps = steps
self._probability_contiguous = probability_contiguous
self._min_elements = min_elements
self._max_elements = max_elements
self._max_allocation_bytes = max_allocation_bytes
self._dim_parameter = dim_parameter
self._dtype = dtype
self._cuda = cuda
self._tensor_constructor = tensor_constructor
@property
def name(self):
return self._name
@staticmethod
def default_tensor_constructor(size, dtype, **kwargs):
if dtype.is_floating_point or dtype.is_complex:
return torch.rand(size=size, dtype=dtype, device="cpu")
else:
return torch.randint(1, 127, size=size, dtype=dtype, device="cpu")
def _make_tensor(self, params, state):
import numpy as np
size, steps, allocation_size = self._get_size_and_steps(params)
constructor = (
self._tensor_constructor or
self.default_tensor_constructor
)
raw_tensor = constructor(size=allocation_size, dtype=self._dtype, **params)
if self._cuda:
raw_tensor = raw_tensor.cuda()
# Randomly permute the Tensor and call `.contiguous()` to force re-ordering
# of the memory, and then permute it back to the original shape.
dim = len(size)
order = np.arange(dim)
if state.rand() > self._probability_contiguous:
while dim > 1 and np.all(order == np.arange(dim)):
order = state.permutation(raw_tensor.dim())
raw_tensor = raw_tensor.permute(tuple(order)).contiguous()
raw_tensor = raw_tensor.permute(tuple(np.argsort(order)))
slices = [slice(0, size * step, step) for size, step in zip(size, steps)]
tensor = raw_tensor[slices]
properties = {
"numel": int(tensor.numel()),
"order": order,
"steps": steps,
"is_contiguous": tensor.is_contiguous(),
"dtype": str(self._dtype),
}
return tensor, properties
def _get_size_and_steps(self, params):
dim = (
params[self._dim_parameter]
if self._dim_parameter is not None
else len(self._size)
)
def resolve(values, dim):
"""Resolve values into concrete integers."""
values = tuple(params.get(i, i) for i in values)
if len(values) > dim:
values = values[:dim]
if len(values) < dim:
values = values + tuple(1 for _ in range(dim - len(values)))
return values
size = resolve(self._size, dim)
steps = resolve(self._steps or (), dim)
allocation_size = tuple(size_i * step_i for size_i, step_i in zip(size, steps))
return size, steps, allocation_size
def satisfies_constraints(self, params):
size, _, allocation_size = self._get_size_and_steps(params)
# Product is computed in Python to avoid integer overflow.
num_elements = prod(size)
assert num_elements >= 0
allocation_bytes = prod(allocation_size, base=dtype_size(self._dtype))
def nullable_greater(left, right):
if left is None or right is None:
return False
return left > right
return not any((
nullable_greater(num_elements, self._max_elements),
nullable_greater(self._min_elements, num_elements),
nullable_greater(allocation_bytes, self._max_allocation_bytes),
))
class Fuzzer:
def __init__(
self,
parameters: list[Union[FuzzedParameter, list[FuzzedParameter]]],
tensors: list[Union[FuzzedTensor, list[FuzzedTensor]]],
constraints: Optional[list[Callable]] = None,
seed: Optional[int] = None
):
"""
Args:
parameters:
List of FuzzedParameters which provide specifications
for generated parameters. Iterable elements will be
unpacked, though arbitrary nested structures will not.
tensors:
List of FuzzedTensors which define the Tensors which
will be created each step based on the parameters for
that step. Iterable elements will be unpacked, though
arbitrary nested structures will not.
constraints:
List of callables. They will be called with params
as kwargs, and if any of them return False the current
set of parameters will be rejected.
seed:
Seed for the RandomState used by the Fuzzer. This will
also be used to set the PyTorch random seed so that random
ops will create reproducible Tensors.
"""
import numpy as np
if seed is None:
seed = int(np.random.RandomState().randint(0, 2 ** 32 - 1, dtype=np.int64))
self._seed = seed
self._parameters = Fuzzer._unpack(parameters, FuzzedParameter)
self._tensors = Fuzzer._unpack(tensors, FuzzedTensor)
self._constraints = constraints or ()
p_names = {p.name for p in self._parameters}
t_names = {t.name for t in self._tensors}
name_overlap = p_names.intersection(t_names)
if name_overlap:
raise ValueError(f"Duplicate names in parameters and tensors: {name_overlap}")
self._rejections = 0
self._total_generated = 0
@staticmethod
def _unpack(values, cls):
return tuple(it.chain.from_iterable(
[[i] if isinstance(i, cls) else i for i in values]
))
def take(self, n):
import numpy as np
state = np.random.RandomState(self._seed)
torch.manual_seed(state.randint(low=0, high=2 ** 63, dtype=np.int64))
for _ in range(n):
params = self._generate(state)
tensors = {}
tensor_properties = {}
for t in self._tensors:
tensor, properties = t._make_tensor(params, state)
tensors[t.name] = tensor
tensor_properties[t.name] = properties
yield tensors, tensor_properties, params
@property
def rejection_rate(self):
if not self._total_generated:
return 0.
return self._rejections / self._total_generated
def _generate(self, state):
strict_params: dict[str, Union[float, int, ParameterAlias]] = {}
for _ in range(1000):
candidate_params: dict[str, Union[float, int, ParameterAlias]] = {}
for p in self._parameters:
if p.strict:
if p.name in strict_params:
candidate_params[p.name] = strict_params[p.name]
else:
candidate_params[p.name] = p.sample(state)
strict_params[p.name] = candidate_params[p.name]
else:
candidate_params[p.name] = p.sample(state)
candidate_params = self._resolve_aliases(candidate_params)
self._total_generated += 1
if not all(f(candidate_params) for f in self._constraints):
self._rejections += 1
continue
if not all(t.satisfies_constraints(candidate_params) for t in self._tensors):
self._rejections += 1
continue
return candidate_params
raise ValueError("Failed to generate a set of valid parameters.")
@staticmethod
def _resolve_aliases(params):
params = dict(params)
alias_count = sum(isinstance(v, ParameterAlias) for v in params.values())
keys = list(params.keys())
while alias_count:
for k in keys:
v = params[k]
if isinstance(v, ParameterAlias):
params[k] = params[v.alias_to]
alias_count_new = sum(isinstance(v, ParameterAlias) for v in params.values())
if alias_count == alias_count_new:
raise ValueError(f"ParameterAlias cycle detected\n{params}")
alias_count = alias_count_new
return params
```
|
====================================================================================================================================
SOURCE CODE FILE: sparse_fuzzer.py
LINES: 1
SIZE: 5.16 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\sparse_fuzzer.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from typing import Optional, Union
from numbers import Number
import torch
from torch.utils.benchmark import FuzzedTensor
import math
class FuzzedSparseTensor(FuzzedTensor):
def __init__(
self,
name: str,
size: tuple[Union[str, int], ...],
min_elements: Optional[int] = None,
max_elements: Optional[int] = None,
dim_parameter: Optional[str] = None,
sparse_dim: Optional[str] = None,
nnz: Optional[str] = None,
density: Optional[str] = None,
coalesced: Optional[str] = None,
dtype=torch.float32,
cuda=False
):
"""
Args:
name:
A string identifier for the generated Tensor.
size:
A tuple of integers or strings specifying the size of the generated
Tensor. String values will replaced with a concrete int during the
generation process, while ints are simply passed as literals.
min_elements:
The minimum number of parameters that this Tensor must have for a
set of parameters to be valid. (Otherwise they are resampled.)
max_elements:
Like `min_elements`, but setting an upper bound.
dim_parameter:
The length of `size` will be truncated to this value.
This allows Tensors of varying dimensions to be generated by the
Fuzzer.
sparse_dim:
The number of sparse dimensions in a sparse tensor.
density:
This value allows tensors of varying sparsities to be generated by the Fuzzer.
coalesced:
The sparse tensor format permits uncoalesced sparse tensors,
where there may be duplicate coordinates in the indices.
dtype:
The PyTorch dtype of the generated Tensor.
cuda:
Whether to place the Tensor on a GPU.
"""
super().__init__(name=name, size=size, min_elements=min_elements,
max_elements=max_elements, dim_parameter=dim_parameter, dtype=dtype, cuda=cuda)
self._density = density
self._coalesced = coalesced
self._sparse_dim = sparse_dim
@staticmethod
def sparse_tensor_constructor(size, dtype, sparse_dim, nnz, is_coalesced):
"""sparse_tensor_constructor creates a sparse tensor with coo format.
Note that when `is_coalesced` is False, the number of elements is doubled but the number of indices
represents the same amount of number of non zeros `nnz`, i.e, this is virtually the same tensor
with the same sparsity pattern. Moreover, most of the sparse operation will use coalesce() method
and what we want here is to get a sparse tensor with the same `nnz` even if this is coalesced or not.
In the other hand when `is_coalesced` is True the number of elements is reduced in the coalescing process
by an unclear amount however the probability to generate duplicates indices are low for most of the cases.
This decision was taken on purpose to maintain the construction cost as low as possible.
"""
if isinstance(size, Number):
size = [size] * sparse_dim
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
if dtype.is_floating_point:
v = torch.rand(size=v_size, dtype=dtype, device="cpu")
else:
v = torch.randint(1, 127, size=v_size, dtype=dtype, device="cpu")
i = torch.rand(sparse_dim, nnz, device="cpu")
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if not is_coalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size))
if is_coalesced:
x = x.coalesce()
return x
def _make_tensor(self, params, state):
size, _, _ = self._get_size_and_steps(params)
density = params['density']
nnz = math.ceil(sum(size) * density)
assert nnz <= sum(size)
is_coalesced = params['coalesced']
sparse_dim = params['sparse_dim'] if self._sparse_dim else len(size)
sparse_dim = min(sparse_dim, len(size))
tensor = self.sparse_tensor_constructor(size, self._dtype, sparse_dim, nnz, is_coalesced)
if self._cuda:
tensor = tensor.cuda()
sparse_dim = tensor.sparse_dim()
dense_dim = tensor.dense_dim()
is_hybrid = len(size[sparse_dim:]) > 0
properties = {
"numel": int(tensor.numel()),
"shape": tensor.size(),
"is_coalesced": tensor.is_coalesced(),
"density": density,
"sparsity": 1.0 - density,
"sparse_dim": sparse_dim,
"dense_dim": dense_dim,
"is_hybrid": is_hybrid,
"dtype": str(self._dtype),
}
return tensor, properties
```
|
=======================================================================================================================================
SOURCE CODE FILE: timeit_template.cpp
LINES: 1
SIZE: 1.03 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\timeit_template.cpp
ENCODING: utf-8
```cpp
/* C++ template for Timer.timeit
This template will be consumed by `cpp_jit.py`, and will replace:
`GLOBAL_SETUP_TEMPLATE_LOCATION`,
`SETUP_TEMPLATE_LOCATION`
and
`STMT_TEMPLATE_LOCATION`
sections with user provided statements.
*/
#include <chrono>
#include <c10/util/irange.h>
#include <torch/csrc/utils/pybind.h>
#include <pybind11/pybind11.h>
#include <torch/extension.h>
// Global setup. (e.g. #includes)
// GLOBAL_SETUP_TEMPLATE_LOCATION
double timeit(int n) {
pybind11::gil_scoped_release no_gil;
// Setup
// SETUP_TEMPLATE_LOCATION
{
// Warmup
// STMT_TEMPLATE_LOCATION
}
// Main loop
auto start_time = std::chrono::high_resolution_clock::now();
for (const auto loop_idx : c10::irange(n)) {
(void)loop_idx;
// STMT_TEMPLATE_LOCATION
}
auto end_time = std::chrono::high_resolution_clock::now();
return std::chrono::duration<double>(end_time - start_time).count();
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("timeit", &timeit);
}
```
|
============================================================================================================================
SOURCE CODE FILE: timer.py
LINES: 5
SIZE: 21.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\timer.py
ENCODING: utf-8
```py
"""Timer class based on the timeit.Timer class, but torch aware."""
import enum
import timeit
import textwrap
from typing import overload, Any, Callable, NoReturn, Optional, Union
import torch
from torch.utils.benchmark.utils import common, cpp_jit
from torch.utils.benchmark.utils._stubs import TimerClass, TimeitModuleType
from torch.utils.benchmark.utils.valgrind_wrapper import timer_interface as valgrind_timer_interface
__all__ = ["Timer", "timer", "Language"]
if torch.backends.cuda.is_built() and torch.cuda.is_available(): # type: ignore[no-untyped-call]
def timer() -> float:
torch.cuda.synchronize()
return timeit.default_timer()
elif torch.xpu.is_available():
def timer() -> float:
torch.xpu.synchronize()
return timeit.default_timer()
elif torch._C._get_privateuse1_backend_name() != "privateuseone":
privateuse1_device_handler = getattr(torch, torch._C._get_privateuse1_backend_name(), None) \
if torch._C._get_privateuse1_backend_name() != "cpu" else None
def timer() -> float:
if privateuse1_device_handler:
privateuse1_device_handler.synchronize()
return timeit.default_timer()
else:
timer = timeit.default_timer
class Language(enum.Enum):
PYTHON = 0
CPP = 1
class CPPTimer:
def __init__(
self,
stmt: str,
setup: str,
global_setup: str,
timer: Callable[[], float],
globals: dict[str, Any],
) -> None:
if timer is not timeit.default_timer:
raise NotImplementedError(
"PyTorch was built with CUDA and a GPU is present; however "
"Timer does not yet support GPU measurements. If your "
"code is CPU only, pass `timer=timeit.default_timer` to the "
"Timer's constructor to indicate this. (Note that this will "
"produce incorrect results if the GPU is in fact used, as "
"Timer will not synchronize CUDA.)"
)
if globals:
raise ValueError("C++ timing does not support globals.")
self._stmt: str = textwrap.dedent(stmt)
self._setup: str = textwrap.dedent(setup)
self._global_setup: str = textwrap.dedent(global_setup)
self._timeit_module: Optional[TimeitModuleType] = None
def timeit(self, number: int) -> float:
if self._timeit_module is None:
self._timeit_module = cpp_jit.compile_timeit_template(
stmt=self._stmt,
setup=self._setup,
global_setup=self._global_setup,
)
return self._timeit_module.timeit(number)
class Timer:
"""Helper class for measuring execution time of PyTorch statements.
For a full tutorial on how to use this class, see:
https://pytorch.org/tutorials/recipes/recipes/benchmark.html
The PyTorch Timer is based on `timeit.Timer` (and in fact uses
`timeit.Timer` internally), but with several key differences:
1) Runtime aware:
Timer will perform warmups (important as some elements of PyTorch are
lazily initialized), set threadpool size so that comparisons are
apples-to-apples, and synchronize asynchronous CUDA functions when
necessary.
2) Focus on replicates:
When measuring code, and particularly complex kernels / models,
run-to-run variation is a significant confounding factor. It is
expected that all measurements should include replicates to quantify
noise and allow median computation, which is more robust than mean.
To that effect, this class deviates from the `timeit` API by
conceptually merging `timeit.Timer.repeat` and `timeit.Timer.autorange`.
(Exact algorithms are discussed in method docstrings.) The `timeit`
method is replicated for cases where an adaptive strategy is not
desired.
3) Optional metadata:
When defining a Timer, one can optionally specify `label`, `sub_label`,
`description`, and `env`. (Defined later) These fields are included in
the representation of result object and by the `Compare` class to group
and display results for comparison.
4) Instruction counts
In addition to wall times, Timer can run a statement under Callgrind
and report instructions executed.
Directly analogous to `timeit.Timer` constructor arguments:
`stmt`, `setup`, `timer`, `globals`
PyTorch Timer specific constructor arguments:
`label`, `sub_label`, `description`, `env`, `num_threads`
Args:
stmt: Code snippet to be run in a loop and timed.
setup: Optional setup code. Used to define variables used in `stmt`
global_setup: (C++ only)
Code which is placed at the top level of the file for things like
`#include` statements.
timer:
Callable which returns the current time. If PyTorch was built
without CUDA or there is no GPU present, this defaults to
`timeit.default_timer`; otherwise it will synchronize CUDA before
measuring the time.
globals:
A dict which defines the global variables when `stmt` is being
executed. This is the other method for providing variables which
`stmt` needs.
label:
String which summarizes `stmt`. For instance, if `stmt` is
"torch.nn.functional.relu(torch.add(x, 1, out=out))"
one might set label to "ReLU(x + 1)" to improve readability.
sub_label:
Provide supplemental information to disambiguate measurements
with identical stmt or label. For instance, in our example
above sub_label might be "float" or "int", so that it is easy
to differentiate:
"ReLU(x + 1): (float)"
"ReLU(x + 1): (int)"
when printing Measurements or summarizing using `Compare`.
description:
String to distinguish measurements with identical label and
sub_label. The principal use of `description` is to signal to
`Compare` the columns of data. For instance one might set it
based on the input size to create a table of the form: ::
| n=1 | n=4 | ...
------------- ...
ReLU(x + 1): (float) | ... | ... | ...
ReLU(x + 1): (int) | ... | ... | ...
using `Compare`. It is also included when printing a Measurement.
env:
This tag indicates that otherwise identical tasks were run in
different environments, and are therefore not equivalent, for
instance when A/B testing a change to a kernel. `Compare` will
treat Measurements with different `env` specification as distinct
when merging replicate runs.
num_threads:
The size of the PyTorch threadpool when executing `stmt`. Single
threaded performance is important as both a key inference workload
and a good indicator of intrinsic algorithmic efficiency, so the
default is set to one. This is in contrast to the default PyTorch
threadpool size which tries to utilize all cores.
"""
_timer_cls: type[TimerClass] = timeit.Timer
def __init__(
self,
stmt: str = "pass",
setup: str = "pass",
global_setup: str = "",
timer: Callable[[], float] = timer,
globals: Optional[dict[str, Any]] = None,
label: Optional[str] = None,
sub_label: Optional[str] = None,
description: Optional[str] = None,
env: Optional[str] = None,
num_threads: int = 1,
language: Union[Language, str] = Language.PYTHON,
):
if not isinstance(stmt, str):
raise ValueError("Currently only a `str` stmt is supported.")
# We copy `globals` to prevent mutations from leaking.
# (For instance, `eval` adds the `__builtins__` key)
self._globals = dict(globals or {})
timer_kwargs = {}
if language in (Language.PYTHON, "py", "python"):
# Include `torch` if not specified as a convenience feature.
self._globals.setdefault("torch", torch)
self._language: Language = Language.PYTHON
if global_setup:
raise ValueError(
f"global_setup is C++ only, got `{global_setup}`. Most "
"likely this code can simply be moved to `setup`."
)
elif language in (Language.CPP, "cpp", "c++"):
assert self._timer_cls is timeit.Timer, "_timer_cls has already been swapped."
self._timer_cls = CPPTimer
setup = ("" if setup == "pass" else setup)
self._language = Language.CPP
timer_kwargs["global_setup"] = global_setup
else:
raise ValueError(f"Invalid language `{language}`.")
# Convenience adjustment so that multi-line code snippets defined in
# functions do not IndentationError (Python) or look odd (C++). The
# leading newline removal is for the initial newline that appears when
# defining block strings. For instance:
# textwrap.dedent("""
# print("This is a stmt")
# """)
# produces '\nprint("This is a stmt")\n'.
#
# Stripping this down to 'print("This is a stmt")' doesn't change
# what gets executed, but it makes __repr__'s nicer.
stmt = textwrap.dedent(stmt)
stmt = (stmt[1:] if stmt and stmt[0] == "\n" else stmt).rstrip()
setup = textwrap.dedent(setup)
setup = (setup[1:] if setup and setup[0] == "\n" else setup).rstrip()
self._timer = self._timer_cls(
stmt=stmt,
setup=setup,
timer=timer,
globals=valgrind_timer_interface.CopyIfCallgrind.unwrap_all(self._globals),
**timer_kwargs,
)
self._task_spec = common.TaskSpec(
stmt=stmt,
setup=setup,
global_setup=global_setup,
label=label,
sub_label=sub_label,
description=description,
env=env,
num_threads=num_threads,
)
def _timeit(self, number: int) -> float:
# Even calling a timer in C++ takes ~50 ns, so no real operation should
# take less than 1 ns. (And this prevents divide by zero errors.)
return max(self._timer.timeit(number), 1e-9)
def timeit(self, number: int = 1000000) -> common.Measurement:
"""Mirrors the semantics of timeit.Timer.timeit().
Execute the main statement (`stmt`) `number` times.
https://docs.python.org/3/library/timeit.html#timeit.Timer.timeit
"""
with common.set_torch_threads(self._task_spec.num_threads):
# Warmup
self._timeit(number=max(int(number // 100), 2))
return common.Measurement(
number_per_run=number,
raw_times=[self._timeit(number=number)],
task_spec=self._task_spec
)
def repeat(self, repeat: int = -1, number: int = -1) -> None:
raise NotImplementedError("See `Timer.blocked_autorange.`")
def autorange(self, callback: Optional[Callable[[int, float], NoReturn]] = None) -> None:
raise NotImplementedError("See `Timer.blocked_autorange.`")
def _threaded_measurement_loop(
self,
number: int,
time_hook: Callable[[], float],
stop_hook: Callable[[list[float]], bool],
min_run_time: float,
max_run_time: Optional[float] = None,
callback: Optional[Callable[[int, float], NoReturn]] = None
) -> list[float]:
total_time = 0.0
can_stop = False
times: list[float] = []
with common.set_torch_threads(self._task_spec.num_threads):
while (total_time < min_run_time) or (not can_stop):
time_spent = time_hook()
times.append(time_spent)
total_time += time_spent
if callback:
callback(number, time_spent)
can_stop = stop_hook(times)
if max_run_time and total_time > max_run_time:
break
return times
def _estimate_block_size(self, min_run_time: float) -> int:
with common.set_torch_threads(self._task_spec.num_threads):
# Estimate the block size needed for measurement to be negligible
# compared to the inner loop. This also serves as a warmup.
overhead = torch.tensor([self._timeit(0) for _ in range(5)]).median().item()
number = 1
while True:
time_taken = self._timeit(number)
relative_overhead = overhead / time_taken
if relative_overhead <= 1e-4 and time_taken >= min_run_time / 1000:
break
if time_taken > min_run_time:
break
# Avoid overflow in C++ pybind11 interface
if number * 10 > 2147483647:
break
number *= 10
return number
def blocked_autorange(
self,
callback: Optional[Callable[[int, float], NoReturn]] = None,
min_run_time: float = 0.2,
) -> common.Measurement:
"""Measure many replicates while keeping timer overhead to a minimum.
At a high level, blocked_autorange executes the following pseudo-code::
`setup`
total_time = 0
while total_time < min_run_time
start = timer()
for _ in range(block_size):
`stmt`
total_time += (timer() - start)
Note the variable `block_size` in the inner loop. The choice of block
size is important to measurement quality, and must balance two
competing objectives:
1) A small block size results in more replicates and generally
better statistics.
2) A large block size better amortizes the cost of `timer`
invocation, and results in a less biased measurement. This is
important because CUDA synchronization time is non-trivial
(order single to low double digit microseconds) and would
otherwise bias the measurement.
blocked_autorange sets block_size by running a warmup period,
increasing block size until timer overhead is less than 0.1% of
the overall computation. This value is then used for the main
measurement loop.
Returns:
A `Measurement` object that contains measured runtimes and
repetition counts, and can be used to compute statistics.
(mean, median, etc.)
"""
number = self._estimate_block_size(min_run_time)
def time_hook() -> float:
return self._timeit(number)
def stop_hook(times: list[float]) -> bool:
return True
times = self._threaded_measurement_loop(
number, time_hook, stop_hook,
min_run_time=min_run_time,
callback=callback)
return common.Measurement(
number_per_run=number,
raw_times=times,
task_spec=self._task_spec
)
def adaptive_autorange(
self,
threshold: float = 0.1,
*,
min_run_time: float = 0.01,
max_run_time: float = 10.0,
callback: Optional[Callable[[int, float], NoReturn]] = None,
) -> common.Measurement:
"""Similar to `blocked_autorange` but also checks for variablility in measurements
and repeats until iqr/median is smaller than `threshold` or `max_run_time` is reached.
At a high level, adaptive_autorange executes the following pseudo-code::
`setup`
times = []
while times.sum < max_run_time
start = timer()
for _ in range(block_size):
`stmt`
times.append(timer() - start)
enough_data = len(times)>3 and times.sum > min_run_time
small_iqr=times.iqr/times.mean<threshold
if enough_data and small_iqr:
break
Args:
threshold: value of iqr/median threshold for stopping
min_run_time: total runtime needed before checking `threshold`
max_run_time: total runtime for all measurements regardless of `threshold`
Returns:
A `Measurement` object that contains measured runtimes and
repetition counts, and can be used to compute statistics.
(mean, median, etc.)
"""
number = self._estimate_block_size(min_run_time=0.05)
def time_hook() -> float:
return self._timeit(number)
def stop_hook(times: list[float]) -> bool:
if len(times) > 3:
return common.Measurement(
number_per_run=number,
raw_times=times,
task_spec=self._task_spec
).meets_confidence(threshold=threshold)
return False
times = self._threaded_measurement_loop(
number, time_hook, stop_hook, min_run_time, max_run_time, callback=callback)
return common.Measurement(
number_per_run=number,
raw_times=times,
task_spec=self._task_spec
)
@overload
def collect_callgrind(
self,
number: int,
*,
repeats: None,
collect_baseline: bool,
retain_out_file: bool,
) -> valgrind_timer_interface.CallgrindStats:
...
@overload
def collect_callgrind(
self,
number: int,
*,
repeats: int,
collect_baseline: bool,
retain_out_file: bool,
) -> tuple[valgrind_timer_interface.CallgrindStats, ...]:
...
def collect_callgrind(
self,
number: int = 100,
*,
repeats: Optional[int] = None,
collect_baseline: bool = True,
retain_out_file: bool = False,
) -> Any:
"""Collect instruction counts using Callgrind.
Unlike wall times, instruction counts are deterministic
(modulo non-determinism in the program itself and small amounts of
jitter from the Python interpreter.) This makes them ideal for detailed
performance analysis. This method runs `stmt` in a separate process
so that Valgrind can instrument the program. Performance is severely
degraded due to the instrumentation, however this is ameliorated by
the fact that a small number of iterations is generally sufficient to
obtain good measurements.
In order to to use this method `valgrind`, `callgrind_control`, and
`callgrind_annotate` must be installed.
Because there is a process boundary between the caller (this process)
and the `stmt` execution, `globals` cannot contain arbitrary in-memory
data structures. (Unlike timing methods) Instead, globals are
restricted to builtins, `nn.Modules`'s, and TorchScripted functions/modules
to reduce the surprise factor from serialization and subsequent
deserialization. The `GlobalsBridge` class provides more detail on this
subject. Take particular care with nn.Modules: they rely on pickle and
you may need to add an import to `setup` for them to transfer properly.
By default, a profile for an empty statement will be collected and
cached to indicate how many instructions are from the Python loop which
drives `stmt`.
Returns:
A `CallgrindStats` object which provides instruction counts and
some basic facilities for analyzing and manipulating results.
"""
if not isinstance(self._task_spec.stmt, str):
raise ValueError("`collect_callgrind` currently only supports string `stmt`")
if repeats is not None and repeats < 1:
raise ValueError("If specified, `repeats` must be >= 1")
# Check that the statement is valid. It doesn't guarantee success, but it's much
# simpler and quicker to raise an exception for a faulty `stmt` or `setup` in
# the parent process rather than the valgrind subprocess.
self._timeit(1)
is_python = (self._language == Language.PYTHON)
assert is_python or not self._globals
result = valgrind_timer_interface.wrapper_singleton().collect_callgrind(
task_spec=self._task_spec,
globals=self._globals,
number=number,
repeats=repeats or 1,
collect_baseline=collect_baseline and is_python,
is_python=is_python,
retain_out_file=retain_out_file,
)
return (result[0] if repeats is None else result)
```
|
================================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\valgrind_wrapper\__init__.py
ENCODING: utf-8
```py
```
|
================================================================================================================================================
SOURCE CODE FILE: callgrind.h
LINES: 1
SIZE: 5.74 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\valgrind_wrapper\callgrind.h
ENCODING: utf-8
```h
/*
----------------------------------------------------------------
Notice that the following BSD-style license applies to this one
file (callgrind.h) only. The rest of Valgrind is licensed under the
terms of the GNU General Public License, version 2, unless
otherwise indicated. See the COPYING file in the source
distribution for details.
----------------------------------------------------------------
This file is part of callgrind, a valgrind tool for cache simulation
and call tree tracing.
Copyright (C) 2003-2017 Josef Weidendorfer. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------
Notice that the above BSD-style license applies to this one file
(callgrind.h) only. The entire rest of Valgrind is licensed under
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
----------------------------------------------------------------
*/
#ifndef __CALLGRIND_H
#define __CALLGRIND_H
#include "valgrind.h"
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end.
The identification ('C','T') for Callgrind has historical
reasons: it was called "Calltree" before. Besides, ('C','G') would
clash with cachegrind.
*/
typedef
enum {
VG_USERREQ__DUMP_STATS = VG_USERREQ_TOOL_BASE('C','T'),
VG_USERREQ__ZERO_STATS,
VG_USERREQ__TOGGLE_COLLECT,
VG_USERREQ__DUMP_STATS_AT,
VG_USERREQ__START_INSTRUMENTATION,
VG_USERREQ__STOP_INSTRUMENTATION
} Vg_CallgrindClientRequest;
/* Dump current state of cost centers, and zero them afterwards */
#define CALLGRIND_DUMP_STATS \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DUMP_STATS, \
0, 0, 0, 0, 0)
/* Dump current state of cost centers, and zero them afterwards.
The argument is appended to a string stating the reason which triggered
the dump. This string is written as a description field into the
profile data dump. */
#define CALLGRIND_DUMP_STATS_AT(pos_str) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DUMP_STATS_AT, \
pos_str, 0, 0, 0, 0)
/* Zero cost centers */
#define CALLGRIND_ZERO_STATS \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__ZERO_STATS, \
0, 0, 0, 0, 0)
/* Toggles collection state.
The collection state specifies whether the happening of events
should be noted or if they are to be ignored. Events are noted
by increment of counters in a cost center */
#define CALLGRIND_TOGGLE_COLLECT \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__TOGGLE_COLLECT, \
0, 0, 0, 0, 0)
/* Start full callgrind instrumentation if not already switched on.
When cache simulation is done, it will flush the simulated cache;
this will lead to an artificial cache warmup phase afterwards with
cache misses which would not have happened in reality. */
#define CALLGRIND_START_INSTRUMENTATION \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__START_INSTRUMENTATION, \
0, 0, 0, 0, 0)
/* Stop full callgrind instrumentation if not already switched off.
This flushes Valgrinds translation cache, and does no additional
instrumentation afterwards, which effectivly will run at the same
speed as the "none" tool (ie. at minimal slowdown).
Use this to bypass Callgrind aggregation for uninteresting code parts.
To start Callgrind in this mode to ignore the setup phase, use
the option "--instr-atstart=no". */
#define CALLGRIND_STOP_INSTRUMENTATION \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STOP_INSTRUMENTATION, \
0, 0, 0, 0, 0)
#endif /* __CALLGRIND_H */
```
|
========================================================================================================================================================
SOURCE CODE FILE: compat_bindings.cpp
LINES: 1
SIZE: 0.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\valgrind_wrapper\compat_bindings.cpp
ENCODING: utf-8
```cpp
/* Used to collect profiles of old versions of PyTorch. */
#include <callgrind.h>
#include <pybind11/pybind11.h>
bool _valgrind_supported_platform() {
#if defined(NVALGRIND)
return false;
#else
return true;
#endif
}
void _valgrind_toggle() {
#if defined(NVALGRIND)
TORCH_CHECK(false, "Valgrind is not supported.");
#else
CALLGRIND_TOGGLE_COLLECT;
#endif
}
void _valgrind_toggle_and_dump_stats() {
#if defined(NVALGRIND)
TORCH_CHECK(false, "Valgrind is not supported.");
#else
// NB: See note in Module.cpp
CALLGRIND_TOGGLE_COLLECT;
CALLGRIND_DUMP_STATS;
#endif
}
PYBIND11_MODULE(callgrind_bindings, m) {
m.def("_valgrind_supported_platform", &_valgrind_supported_platform);
m.def("_valgrind_toggle", &_valgrind_toggle);
m.def("_valgrind_toggle_and_dump_stats", &_valgrind_dump_stats);
}
```
|
=================================================================================================================================================================
SOURCE CODE FILE: timer_callgrind_template.cpp
LINES: 1
SIZE: 1.70 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\valgrind_wrapper\timer_callgrind_template.cpp
ENCODING: utf-8
```cpp
/* C++ template for Timer.collect_callgrind
This template will be consumed by `cpp_jit.py`, and will replace:
`GLOBAL_SETUP_TEMPLATE_LOCATION`,
`SETUP_TEMPLATE_LOCATION`
and
`STMT_TEMPLATE_LOCATION`
sections with user provided statements.
*/
#include <c10/util/irange.h>
#include <callgrind.h>
#include <torch/torch.h>
#include <string>
// Global setup. (e.g. #includes)
// GLOBAL_SETUP_TEMPLATE_LOCATION
#if defined(NVALGRIND)
static_assert(false);
#endif
int main(int argc, char* argv[]) {
// This file should only be called inside of `Timer`, so we can adopt a
// very simple and rigid argument parsing scheme.
TORCH_CHECK(argc == 9);
TORCH_CHECK(std::string(argv[1]) == "--number");
auto number = std::stoi(argv[2]);
TORCH_CHECK(
std::string(argv[3]) == "--number-warmup" ||
std::string(argv[3]) == "--number_warmup");
auto number_warmup = std::stoi(argv[4]);
TORCH_CHECK(std::string(argv[5]) == "--repeats");
auto repeats = std::stoi(argv[6]);
TORCH_CHECK(
std::string(argv[7]) == "--number-threads" ||
std::string(argv[7]) == "--number_threads");
auto number_threads = std::stoi(argv[8]);
torch::set_num_threads(number_threads);
// Setup
// SETUP_TEMPLATE_LOCATION
// Warmup
for (const auto i : c10::irange(number_warmup)) {
(void)i;
// STMT_TEMPLATE_LOCATION
}
// Main loop
for (const auto repeat : c10::irange(repeats)) {
(void)repeat;
CALLGRIND_TOGGLE_COLLECT;
for (const auto i : c10::irange(number)) {
(void)i;
// STMT_TEMPLATE_LOCATION
}
// NB: See note in Module.cpp
CALLGRIND_TOGGLE_COLLECT;
CALLGRIND_DUMP_STATS;
}
}
```
|
=======================================================================================================================================================
SOURCE CODE FILE: timer_interface.py
LINES: 21
SIZE: 37.28 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\valgrind_wrapper\timer_interface.py
ENCODING: utf-8
```py
"""Intermediate layer between `Timer` and `valgrind`."""
import collections
import enum
import dataclasses
import itertools as it
import os
import pickle
import re
import shutil
import subprocess
import sys
import textwrap
from typing import (
cast, Any, Callable, NamedTuple,
Optional, Union, TYPE_CHECKING)
from collections.abc import Iterator
import torch
from torch.utils.benchmark.utils import common, cpp_jit
from torch.utils.benchmark.utils._stubs import CallgrindModuleType
import operator
__all__ = ["FunctionCount", "FunctionCounts", "CallgrindStats", "CopyIfCallgrind"]
if TYPE_CHECKING:
CompletedProcessType = subprocess.CompletedProcess[str]
else:
CompletedProcessType = subprocess.CompletedProcess
class FunctionCount(NamedTuple):
# TODO(#105471): Rename the count field
count: int # type: ignore[assignment]
function: str
@dataclasses.dataclass(repr=False, eq=False, frozen=True)
class FunctionCounts:
"""Container for manipulating Callgrind results.
It supports:
1) Addition and subtraction to combine or diff results.
2) Tuple-like indexing.
3) A `denoise` function which strips CPython calls which are known to
be non-deterministic and quite noisy.
4) Two higher order methods (`filter` and `transform`) for custom
manipulation.
"""
_data: tuple[FunctionCount, ...]
inclusive: bool
truncate_rows: bool = True
# For normal use, torch._tensor_str.PRINT_OPTS.linewidth determines
# the print settings. This is simply to allow hermetic unit tests.
_linewidth: Optional[int] = None
def __iter__(self) -> Iterator[FunctionCount]:
yield from self._data
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, item: Any) -> Union[FunctionCount, "FunctionCounts"]:
data: Union[FunctionCount, tuple[FunctionCount, ...]] = self._data[item]
return (
FunctionCounts(cast(tuple[FunctionCount, ...], data), self.inclusive, truncate_rows=False)
if isinstance(data, tuple) else data
)
def __repr__(self) -> str:
count_len = 0
for c, _ in self:
# Account for sign in string length.
count_len = max(count_len, len(str(c)) + int(c < 0))
lines = []
linewidth = self._linewidth or torch._tensor_str.PRINT_OPTS.linewidth
fn_str_len = max(linewidth - count_len - 4, 40)
for c, fn in self:
if len(fn) > fn_str_len:
left_len = int((fn_str_len - 5) // 2)
fn = fn[:left_len] + " ... " + fn[-(fn_str_len - left_len - 5):]
lines.append(f" {c:>{count_len}} {fn}")
if self.truncate_rows and len(lines) > 18:
lines = lines[:9] + ["...".rjust(count_len + 2)] + lines[-9:]
if not self.inclusive:
lines.extend(["", f"Total: {self.sum()}"])
return "\n".join([super().__repr__()] + lines)
def __add__(
self,
other: "FunctionCounts",
) -> "FunctionCounts":
return self._merge(other, lambda c: c)
def __sub__(
self,
other: "FunctionCounts",
) -> "FunctionCounts":
return self._merge(other, operator.neg)
def __mul__(self, other: Union[int, float]) -> "FunctionCounts":
return self._from_dict({
fn: int(c * other) for c, fn in self._data
}, self.inclusive)
def transform(self, map_fn: Callable[[str], str]) -> "FunctionCounts":
"""Apply `map_fn` to all of the function names.
This can be used to regularize function names (e.g. stripping irrelevant
parts of the file path), coalesce entries by mapping multiple functions
to the same name (in which case the counts are added together), etc.
"""
counts: collections.defaultdict[str, int] = collections.defaultdict(int)
for c, fn in self._data:
counts[map_fn(fn)] += c
return self._from_dict(counts, self.inclusive)
def filter(self, filter_fn: Callable[[str], bool]) -> "FunctionCounts":
"""Keep only the elements where `filter_fn` applied to function name returns True."""
return FunctionCounts(tuple(i for i in self if filter_fn(i.function)), self.inclusive)
def sum(self) -> int:
return sum(c for c, _ in self)
def denoise(self) -> "FunctionCounts":
"""Remove known noisy instructions.
Several instructions in the CPython interpreter are rather noisy. These
instructions involve unicode to dictionary lookups which Python uses to
map variable names. FunctionCounts is generally a content agnostic
container, however this is sufficiently important for obtaining
reliable results to warrant an exception."""
return self.filter(lambda fn: "dictobject.c:lookdict_unicode" not in fn)
def _merge(
self,
second: "FunctionCounts",
merge_fn: Callable[[int], int]
) -> "FunctionCounts":
assert self.inclusive == second.inclusive, "Cannot merge inclusive and exclusive counts."
counts: collections.defaultdict[str, int] = collections.defaultdict(int)
for c, fn in self:
counts[fn] += c
for c, fn in second:
counts[fn] += merge_fn(c)
return self._from_dict(counts, self.inclusive)
@staticmethod
def _from_dict(counts: dict[str, int], inclusive: bool) -> "FunctionCounts":
flat_counts = (FunctionCount(c, fn) for fn, c in counts.items() if c)
return FunctionCounts(tuple(sorted(flat_counts, reverse=True)), inclusive)
@dataclasses.dataclass(repr=False, eq=False, frozen=True)
class CallgrindStats:
"""Top level container for Callgrind results collected by Timer.
Manipulation is generally done using the FunctionCounts class, which is
obtained by calling `CallgrindStats.stats(...)`. Several convenience
methods are provided as well; the most significant is
`CallgrindStats.as_standardized()`.
"""
task_spec: common.TaskSpec
number_per_run: int
built_with_debug_symbols: bool
baseline_inclusive_stats: FunctionCounts
baseline_exclusive_stats: FunctionCounts
stmt_inclusive_stats: FunctionCounts
stmt_exclusive_stats: FunctionCounts
stmt_callgrind_out: Optional[str]
def __repr__(self) -> str:
base_stats = self.baseline_exclusive_stats
output = f"""
{super().__repr__()}
{self.task_spec.summarize()}
{'':>25}All{'':>10}Noisy symbols removed
Instructions: {self.counts(denoise=False):>12}{'':>15}{self.counts(denoise=True):>12}
Baseline: {base_stats.sum():>12}{'':>15}{base_stats.denoise().sum():>12}
{self.number_per_run} runs per measurement, {self.task_spec.num_threads} thread{'s' if self.task_spec.num_threads > 1 else ''}
""".strip()
if not self.built_with_debug_symbols:
output += textwrap.dedent("""
Warning: PyTorch was not built with debug symbols.
Source information may be limited. Rebuild with
REL_WITH_DEB_INFO=1 for more detailed results.""")
return output
def stats(self, inclusive: bool = False) -> FunctionCounts:
"""Returns detailed function counts.
Conceptually, the FunctionCounts returned can be thought of as a tuple
of (count, path_and_function_name) tuples.
`inclusive` matches the semantics of callgrind. If True, the counts
include instructions executed by children. `inclusive=True` is useful
for identifying hot spots in code; `inclusive=False` is useful for
reducing noise when diffing counts from two different runs. (See
CallgrindStats.delta(...) for more details)
"""
return self.stmt_inclusive_stats if inclusive else self.stmt_exclusive_stats
def counts(self, *, denoise: bool = False) -> int:
"""Returns the total number of instructions executed.
See `FunctionCounts.denoise()` for an explanation of the `denoise` arg.
"""
stats = self.stmt_exclusive_stats
return (stats.denoise() if denoise else stats).sum()
# FIXME: Once 3.7 is the minimum version, type annotate `other` per PEP 563
def delta(
self,
other: "CallgrindStats",
inclusive: bool = False,
) -> FunctionCounts:
"""Diff two sets of counts.
One common reason to collect instruction counts is to determine the
the effect that a particular change will have on the number of instructions
needed to perform some unit of work. If a change increases that number, the
next logical question is "why". This generally involves looking at what part
if the code increased in instruction count. This function automates that
process so that one can easily diff counts on both an inclusive and
exclusive basis.
"""
return self.stats(inclusive=inclusive) - other.stats(inclusive=inclusive)
def as_standardized(self) -> "CallgrindStats":
"""Strip library names and some prefixes from function strings.
When comparing two different sets of instruction counts, on stumbling
block can be path prefixes. Callgrind includes the full filepath
when reporting a function (as it should). However, this can cause
issues when diffing profiles. If a key component such as Python
or PyTorch was built in separate locations in the two profiles, which
can result in something resembling::
23234231 /tmp/first_build_dir/thing.c:foo(...)
9823794 /tmp/first_build_dir/thing.c:bar(...)
...
53453 .../aten/src/Aten/...:function_that_actually_changed(...)
...
-9823794 /tmp/second_build_dir/thing.c:bar(...)
-23234231 /tmp/second_build_dir/thing.c:foo(...)
Stripping prefixes can ameliorate this issue by regularizing the
strings and causing better cancellation of equivalent call sites
when diffing.
"""
def strip(stats: FunctionCounts) -> FunctionCounts:
transforms = (
# PyTorch may have been built in different locations.
(r"^.+build/\.\./", "build/../"),
(r"^.+/" + re.escape("build/aten/"), "build/aten/"),
# "Python" and "Objects" come from CPython.
(r"^.+/" + re.escape("Python/"), "Python/"),
(r"^.+/" + re.escape("Objects/"), "Objects/"),
# Strip library name. e.g. `libtorch.so`
(r"\s\[.+\]$", ""),
)
for before, after in transforms:
stats = stats.transform(lambda fn: re.sub(before, after, fn))
return stats
return CallgrindStats(
task_spec=self.task_spec,
number_per_run=self.number_per_run,
built_with_debug_symbols=self.built_with_debug_symbols,
baseline_inclusive_stats=strip(self.baseline_inclusive_stats),
baseline_exclusive_stats=strip(self.baseline_exclusive_stats),
stmt_inclusive_stats=strip(self.stmt_inclusive_stats),
stmt_exclusive_stats=strip(self.stmt_exclusive_stats),
# `as_standardized` will change symbol names, so the contents will
# no longer map directly to `callgrind.out`
stmt_callgrind_out=None,
)
class Serialization(enum.Enum):
PICKLE = 0
TORCH = 1
TORCH_JIT = 2
_GLOBALS_ALLOWED_TYPES: dict[Serialization, tuple[Any, ...]] = {
Serialization.PICKLE: (str, bytes, bool, int, float, complex),
Serialization.TORCH_JIT: (torch.jit.ScriptFunction, torch.jit.ScriptModule),
Serialization.TORCH: (torch.nn.Module,),
}
class CopyIfCallgrind:
"""Signal that a global may be replaced with a deserialized copy.
See `GlobalsBridge` for why this matters.
"""
def __init__(self, value: Any, *, setup: Optional[str] = None):
for method, supported_types in _GLOBALS_ALLOWED_TYPES.items():
if any(isinstance(value, t) for t in supported_types):
self._value: Any = value
self._setup: Optional[str] = setup
self._serialization: Serialization = method
break
else:
supported_str = "\n".join([
getattr(t, "__name__", repr(t))
for t in it.chain(_GLOBALS_ALLOWED_TYPES.values())])
raise ValueError(
f"Unsupported type: {type(value)}\n"
f"`collect_callgrind` restricts globals to the following types:\n"
f"{textwrap.indent(supported_str, ' ')}"
)
@property
def value(self) -> Any:
return self._value
@property
def setup(self) -> Optional[str]:
return self._setup
@property
def serialization(self) -> Serialization:
return self._serialization
@staticmethod
def unwrap_all(globals: dict[str, Any]) -> dict[str, Any]:
return {
k: (v.value if isinstance(v, CopyIfCallgrind) else v)
for k, v in globals.items()
}
class GlobalsBridge:
"""Handle the transfer of (certain) globals when collecting Callgrind statistics.
Key takeaway: Any globals passed must be wrapped in `CopyIfCallgrind` to
work with `Timer.collect_callgrind`.
Consider the following code snippet:
```
import pickle
import timeit
class Counter:
value = 0
def __call__(self):
self.value += 1
counter = Counter()
timeit.Timer("counter()", globals={"counter": counter}).timeit(10)
print(counter.value) # 10
timeit.Timer(
"counter()",
globals={"counter": pickle.loads(pickle.dumps(counter))}
).timeit(20)
print(counter.value) # Still 10
```
In the first case, `stmt` is executed using the objects in `globals`;
however, the addition of serialization and deserialization changes the
semantics and may meaningfully change behavior.
This is a practical consideration when collecting Callgrind statistics.
Unlike `exec` based execution (which `timeit` uses under the hood) which
can share in-memory data structures with the caller, Callgrind collection
requires an entirely new process in order to run under Valgrind. This means
that any data structures used for statement execution will have to be
serialized and deserialized in the subprocess.
In order to avoid surprising semantics from (user invisible) process
boundaries, what can be passed through `globals` is severely restricted
for `Timer.collect_callgrind`. It is expected that most setup should be
achievable (albeit perhaps less ergonomically) by passing a `setup`
string.
There are, however, exceptions. One such class are TorchScripted functions.
Because they require a concrete file with source code it is not possible
to define them using a `setup` string. Another group are torch.nn.Modules,
whose construction can be complex and prohibitively cumbersome to coerce
into a `setup` string. Finally, most builtin types are sufficiently well
behaved and sufficiently common to warrant allowing as well. (e.g.
`globals={"n": 1}` is very convenient.)
Fortunately, all have well defined serialization semantics. This class
is responsible for enabling the Valgrind subprocess to use elements in
`globals` so long as they are an allowed type.
Caveats:
The user is required to acknowledge this serialization by wrapping
elements in `globals` with `CopyIfCallgrind`.
While ScriptFunction and ScriptModule are expected to save and load
quite robustly, it is up to the user to ensure that an nn.Module can
un-pickle successfully.
`torch.Tensor` and `np.ndarray` are deliberately excluded. The
serialization/deserialization process perturbs the representation of a
tensor in ways that could result in incorrect measurements. For example,
if a tensor lives in pinned CPU memory, this fact would not be preserved
by a dump, and that will in turn change the performance of certain CUDA
operations.
"""
def __init__(self, globals: dict[str, Any], data_dir: str) -> None:
self._globals: dict[str, CopyIfCallgrind] = {}
self._data_dir = data_dir
if not os.path.exists(data_dir):
os.mkdir(data_dir)
if globals.get("torch", torch) is not torch:
raise ValueError("`collect_callgrind` does not support mocking out `torch`.")
for name, value in globals.items():
if name in ("torch", "__builtins__"):
# Torch will be imported by the collection script, and
# __builtins__ is added by Timer.
continue
if not isinstance(value, CopyIfCallgrind):
raise ValueError(
"`collect_callgrind` requires that globals be wrapped in "
"`CopyIfCallgrind` so that serialization is explicit."
)
self._globals[name] = value
def construct(self) -> str:
load_lines = []
for name, wrapped_value in self._globals.items():
if wrapped_value.setup is not None:
load_lines.append(textwrap.dedent(wrapped_value.setup))
if wrapped_value.serialization == Serialization.PICKLE:
path = os.path.join(self._data_dir, f"{name}.pkl")
load_lines.append(
f"with open({repr(path)}, 'rb') as f:\n {name} = pickle.load(f)")
with open(path, "wb") as f:
pickle.dump(wrapped_value.value, f)
elif wrapped_value.serialization == Serialization.TORCH:
path = os.path.join(self._data_dir, f"{name}.pt")
# TODO: Figure out if we can use torch.serialization.add_safe_globals here
# Using weights_only=False after the change in
# https://dev-discuss.pytorch.org/t/bc-breaking-change-torch-load-is-being-flipped-to-use-weights-only-true-by-default-in-the-nightlies-after-137602/2573
load_lines.append(f"{name} = torch.load({repr(path)}, weights_only=False)")
torch.save(wrapped_value.value, path)
elif wrapped_value.serialization == Serialization.TORCH_JIT:
path = os.path.join(self._data_dir, f"{name}.pt")
load_lines.append(f"{name} = torch.jit.load({repr(path)})")
with open(path, "wb") as f:
torch.jit.save(wrapped_value.value, f) # type: ignore[no-untyped-call]
else:
raise NotImplementedError(
f"Unknown serialization method: {wrapped_value.serialization}")
return "\n".join(load_lines)
class _ValgrindWrapper:
def __init__(self) -> None:
self._bindings_module: Optional[CallgrindModuleType] = None
valgrind_symbols = (
"_valgrind_supported_platform",
"_valgrind_toggle",
"_valgrind_toggle_and_dump_stats",
)
if all(hasattr(torch._C, symbol) for symbol in valgrind_symbols):
self._supported_platform: bool = torch._C._valgrind_supported_platform()
else:
print("Callgrind bindings are not present in `torch._C`. JIT-ing bindings.")
self._bindings_module = cpp_jit.get_compat_bindings()
assert all(hasattr(self._bindings_module, symbol) for symbol in valgrind_symbols)
self._supported_platform = self._bindings_module._valgrind_supported_platform()
self._commands_available: dict[str, bool] = {}
if self._supported_platform:
# Only bother checking on supported platforms.
for cmd in ("valgrind", "callgrind_control", "callgrind_annotate"):
self._commands_available[cmd] = not subprocess.run(
["which", cmd],
capture_output=True,
check=False,
).returncode
self._build_type: Optional[str] = None
build_search = re.search("BUILD_TYPE=(.+),", torch.__config__.show()) # type: ignore[no-untyped-call]
if build_search is not None:
self._build_type = build_search.groups()[0].split(",")[0]
def _validate(self) -> None:
if not self._supported_platform:
raise OSError("Valgrind is not supported on this platform.")
missing_cmds = [cmd for cmd, available in self._commands_available.items() if not available]
if missing_cmds:
raise OSError("Missing: " + ", ".join(missing_cmds))
def collect_callgrind(
self,
task_spec: common.TaskSpec,
globals: dict[str, Any],
*,
number: int,
repeats: int,
collect_baseline: bool,
is_python: bool,
retain_out_file: bool,
) -> tuple[CallgrindStats, ...]:
"""Collect stats, and attach a reference run which can be used to filter interpreter overhead."""
self._validate()
assert is_python or not collect_baseline
*task_stats, baseline_stats = self._invoke(
task_spec=task_spec,
globals=globals,
number=number,
repeats=repeats,
collect_baseline=collect_baseline,
is_python=is_python,
retain_out_file=retain_out_file,
)
assert len(task_stats) == repeats
return tuple(
CallgrindStats(
task_spec=task_spec,
number_per_run=number,
built_with_debug_symbols=self._build_type == "RelWithDebInfo",
baseline_inclusive_stats=baseline_stats[0],
baseline_exclusive_stats=baseline_stats[1],
stmt_inclusive_stats=stmt_inclusive_stats,
stmt_exclusive_stats=stmt_exclusive_stats,
stmt_callgrind_out=out_contents,
)
for stmt_inclusive_stats, stmt_exclusive_stats, out_contents in task_stats
)
def _invoke(
self,
*,
task_spec: common.TaskSpec,
globals: dict[str, Any],
number: int,
repeats: int,
collect_baseline: bool,
is_python: bool,
retain_out_file: bool,
) -> tuple[tuple[FunctionCounts, FunctionCounts, Optional[str]], ...]:
"""Core invocation method for Callgrind collection.
Valgrind operates by effectively replacing the CPU with an emulated
version which allows it to instrument any code at the cost of severe
performance degradation. This has the practical effect that in order
to collect Callgrind statistics, a new process has to be created
running under `valgrind`. The steps for this process are:
1) Create a scratch directory.
2) Codegen a run script. (_ValgrindWrapper._construct_script)
Inside the run script:
* Validate that Python and torch match the parent process
* Validate that it is indeed running under valgrind
* Execute `setup` and warm up `stmt`
* Begin collecting stats
* Run the `stmt` loop
* Stop collecting stats
3) Parse the run results.
4) Cleanup the scratch directory.
"""
working_dir = common._make_temp_dir(prefix="callgrind")
data_dir = os.path.join(working_dir, "data")
script_file = os.path.join(working_dir, "timer_callgrind.py")
callgrind_out = os.path.join(working_dir, "callgrind.out")
error_log = os.path.join(working_dir, "error.txt")
stat_log = os.path.join(working_dir, "callgrind_stat.txt")
stdout_stderr_log = os.path.join(working_dir, "stdout_stderr.log")
def run(args: list[str], **kwargs: Any) -> tuple[CompletedProcessType, str]:
# https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/
f_stdout_stderr = open(stdout_stderr_log, "wb")
try:
invocation = subprocess.run(
args,
stdout=f_stdout_stderr,
stderr=subprocess.STDOUT,
**kwargs,
)
with open(stdout_stderr_log) as f:
return invocation, f.read()
finally:
f_stdout_stderr.close()
try:
if is_python:
if self._bindings_module is not None:
shutil.copy(
self._bindings_module.__file__,
os.path.join(working_dir, os.path.split(self._bindings_module.__file__)[1])
)
script_file = os.path.join(working_dir, "timer_callgrind.py")
with open(script_file, "w") as f:
f.write(self._construct_script(
task_spec,
globals=GlobalsBridge(globals, data_dir),
number=number,
repeats=repeats,
collect_baseline=collect_baseline,
error_log=error_log,
stat_log=stat_log,
bindings=self._bindings_module))
run_loop_cmd = ["python", script_file]
else:
assert not collect_baseline
run_loop_exec = cpp_jit.compile_callgrind_template(
stmt=task_spec.stmt,
setup=task_spec.setup,
global_setup=task_spec.global_setup,
)
run_loop_cmd = [
run_loop_exec,
"--number", str(number),
"--number-warmup", str(min(number, 10)),
"--repeats", str(repeats),
"--number-threads", str(task_spec.num_threads),
]
valgrind_invocation, valgrind_invocation_output = run([
"valgrind",
"--tool=callgrind",
f"--callgrind-out-file={callgrind_out}",
"--dump-line=yes",
"--dump-instr=yes",
"--instr-atstart=yes",
"--collect-atstart=no",
] + run_loop_cmd)
if valgrind_invocation.returncode:
error_report = ""
if os.path.exists(error_log):
with open(error_log) as f:
error_report = f.read()
if not error_report:
error_report = "Unknown error.\n" + valgrind_invocation_output
raise OSError(f"Failed to collect callgrind profile:\n{error_report}")
def parse_output(fpath: str, inclusive: bool) -> FunctionCounts:
_annotate_invocation, annotate_invocation_output = run([
"callgrind_annotate",
f"--inclusive={'yes' if inclusive else 'no'}",
"--threshold=100",
"--show-percs=no",
fpath
], check=True)
total_pattern = re.compile(r"^([0-9,]+)\s+PROGRAM TOTALS")
begin_pattern = re.compile(r"Ir\s+file:function")
function_pattern = re.compile(r"^\s*([0-9,]+)\s+(.+:.+)$")
class ScanState(enum.Enum):
SCANNING_FOR_TOTAL = 0
SCANNING_FOR_START = 1
PARSING = 2
scan_state = ScanState.SCANNING_FOR_TOTAL
fn_counts = []
for l in annotate_invocation_output.splitlines(keepends=False):
if scan_state == ScanState.SCANNING_FOR_TOTAL:
total_match = total_pattern.match(l)
if total_match:
program_totals = int(total_match.groups()[0].replace(",", ""))
scan_state = ScanState.SCANNING_FOR_START
elif scan_state == ScanState.SCANNING_FOR_START:
if begin_pattern.match(l):
scan_state = ScanState.PARSING
else:
assert scan_state == ScanState.PARSING
fn_match = function_pattern.match(l)
if fn_match:
ir_str, file_function = fn_match.groups()
ir = int(ir_str.replace(",", ""))
if ir == program_totals: # type: ignore[possibly-undefined]
# Callgrind includes some top level red herring symbols when
# a program dumps multiple profiles.
continue
fn_counts.append(FunctionCount(ir, file_function))
elif re.match(r"-+", l):
# Ignore heading separator lines.
continue
else:
break
assert scan_state == ScanState.PARSING, f"Failed to parse {fpath}"
return FunctionCounts(tuple(sorted(fn_counts, reverse=True)), inclusive=inclusive)
def read_results(i: int) -> tuple[FunctionCounts, FunctionCounts, Optional[str]]:
if i == repeats and not collect_baseline:
# Null baseline.
return (
FunctionCounts((), inclusive=True),
FunctionCounts((), inclusive=False),
None,
)
fpath = f"{callgrind_out}.{i + 1}" # Callgrind one-indexes files.
callgrind_out_contents: Optional[str] = None
if retain_out_file:
with open(fpath) as f:
callgrind_out_contents = f.read()
return (
parse_output(fpath, inclusive=True),
parse_output(fpath, inclusive=False),
callgrind_out_contents
)
return tuple(read_results(i) for i in range(repeats + 1))
finally:
shutil.rmtree(working_dir)
@staticmethod
def _construct_script(
task_spec: common.TaskSpec,
globals: GlobalsBridge,
*,
number: int,
repeats: int,
collect_baseline: bool,
error_log: str,
stat_log: str,
bindings: Optional[CallgrindModuleType],
) -> str:
def block_stmt(stmt: str, indent: int = 0) -> str:
"""Partially unroll benchmark loop.
The naive template looks something like:
"for _ in range({number}): {stmt}"
However a loop in Python is surprisingly expensive, and significantly
increases the number of background Python instructions. So instead we
partially unroll the loops, with a block size of 100 chosen to keep
the instruction overhead from `range` low while also not ballooning
the size of the generated file.
"""
block_size = 100
loop_count = number // block_size
if loop_count == 1:
# There is no point in having `for _ in range(1): ...` rather
# than just `...`, and this lets us save shave a few background
# instructions.
loop_count = 0
remainder = number - block_size * loop_count
blocked_stmt = ""
if loop_count:
unrolled_stmts = textwrap.indent("\n".join([stmt] * block_size), " " * 4)
blocked_stmt += f"for _ in range({loop_count}):\n{unrolled_stmts}\n"
if remainder:
blocked_stmt += "\n".join([stmt] * remainder)
return textwrap.indent(blocked_stmt, " " * indent)
pass_baseline = (
"callgrind_bindings._valgrind_toggle()\n"
f"{block_stmt('pass')}\n"
"callgrind_bindings._valgrind_toggle_and_dump_stats()"
)
return textwrap.dedent(r"""
import gc
import os
import pickle
import subprocess
import sys
import time
# Mitigate https://github.com/pytorch/pytorch/issues/37377
# which can sometimes cause the subprocess call to fail.
import numpy as np
import torch
torch.set_num_threads({num_threads})
{bindings_import}
PID = os.getpid()
def log_failure(msg):
with open({error_log_repr}, "wt") as f:
f.write(msg)
sys.exit(1)
def check_result(completed_process):
if completed_process.returncode:
log_failure(f"Command failed: {{' '.join(completed_process.args)}}")
return completed_process
# =============================================================================
# == Check that subprocess matches parent =====================================
# =============================================================================
if os.path.realpath(sys.executable) != "{parent_interpreter}":
log_failure(
"Interpreter mismatch:\n"
f" {{os.path.realpath(sys.executable)}}\n vs.\n {parent_interpreter}"
)
if torch.__file__ != "{torch_file}":
log_failure(
"PyTorch does not match expected file:\n"
f" {{torch.__file__}}\n vs.\n {torch_file}"
)
# =============================================================================
# == User specified setup =====================================================
# =============================================================================
# Load serialized globals
{load_globals}
# User setup str
{setup}
for _ in range({warmup_number}):
{indented_stmt}
# =============================================================================
# == Callgrind management =====================================================
# =============================================================================
with open("{stat_log}", "wb") as stat_file:
# If many instances of callgrind are running at once, the output of
# `callgrind_control` may exceed 16kb which would cause `subprocess.PIPE`
# to deadlock. So instead we use a file.
callgrind_stat = check_result(subprocess.run(
["callgrind_control", "--stat"],
stdout=stat_file,
stderr=subprocess.STDOUT,
))
with open("{stat_log}", "rt") as stat_file:
stat_lines = stat_file.read().splitlines()
if f"PID {{PID}}: python {{__file__}}" not in stat_lines:
log_failure("Process does not appear to be running callgrind.")
gc.collect()
time.sleep(0.01)
# =============================================================================
# == User code block ==========================================================
# =============================================================================
for _ in range({repeats}):
callgrind_bindings._valgrind_toggle()
{blocked_stmt}
callgrind_bindings._valgrind_toggle_and_dump_stats()
gc.collect()
{baseline}
""").strip().format(
indented_stmt=textwrap.indent(task_spec.stmt, " " * 4),
blocked_stmt=block_stmt(task_spec.stmt, indent=4),
baseline=(pass_baseline if collect_baseline else ""),
number=number,
repeats=repeats,
load_globals=globals.construct(),
setup=task_spec.setup,
warmup_number=min(number, 10),
num_threads=task_spec.num_threads,
error_log_repr=repr(error_log),
stat_log=stat_log,
parent_interpreter=os.path.realpath(sys.executable),
torch_file=torch.__file__,
bindings_import=(
"import torch._C as callgrind_bindings" if bindings is None
else f"import {bindings.__name__} as callgrind_bindings"),
)
CALLGRIND_SINGLETON: Optional[_ValgrindWrapper] = None
def wrapper_singleton() -> _ValgrindWrapper:
global CALLGRIND_SINGLETON
if CALLGRIND_SINGLETON is None:
CALLGRIND_SINGLETON = _ValgrindWrapper()
return CALLGRIND_SINGLETON
```
|
===============================================================================================================================================
SOURCE CODE FILE: valgrind.h
LINES: 1657
SIZE: 419.74 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\benchmark\utils\valgrind_wrapper\valgrind.h
ENCODING: utf-8
```h
/* -*- c -*-
----------------------------------------------------------------
Notice that the following BSD-style license applies to this one
file (valgrind.h) only. The rest of Valgrind is licensed under the
terms of the GNU General Public License, version 2, unless
otherwise indicated. See the COPYING file in the source
distribution for details.
----------------------------------------------------------------
This file is part of Valgrind, a dynamic binary instrumentation
framework.
Copyright (C) 2000-2017 Julian Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------
Notice that the above BSD-style license applies to this one file
(valgrind.h) only. The entire rest of Valgrind is licensed under
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
----------------------------------------------------------------
*/
/* This file is for inclusion into client (your!) code.
You can use these macros to manipulate and query Valgrind's
execution inside your own programs.
The resulting executables will still run without Valgrind, just a
little bit more slowly than they otherwise would, but otherwise
unchanged. When not running on valgrind, each client request
consumes very few (eg. 7) instructions, so the resulting performance
loss is negligible unless you plan to execute client requests
millions of times per second. Nevertheless, if that is still a
problem, you can compile with the NVALGRIND symbol defined (gcc
-DNVALGRIND) so that client requests are not even compiled in. */
#ifndef __VALGRIND_H
#define __VALGRIND_H
/* ------------------------------------------------------------------ */
/* VERSION NUMBER OF VALGRIND */
/* ------------------------------------------------------------------ */
/* Specify Valgrind's version number, so that user code can
conditionally compile based on our version number. Note that these
were introduced at version 3.6 and so do not exist in version 3.5
or earlier. The recommended way to use them to check for "version
X.Y or later" is (eg)
#if defined(__VALGRIND_MAJOR__) && defined(__VALGRIND_MINOR__) \
&& (__VALGRIND_MAJOR__ > 3 \
|| (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
*/
#define __VALGRIND_MAJOR__ 3
#define __VALGRIND_MINOR__ 17
#include <stdarg.h>
/* Nb: this file might be included in a file compiled with -ansi. So
we can't use C++ style "//" comments nor the "asm" keyword (instead
use "__asm__"). */
/* Derive some tags indicating what the target platform is. Note
that in this file we're using the compiler's CPP symbols for
identifying architectures, which are different to the ones we use
within the rest of Valgrind. Note, __powerpc__ is active for both
32 and 64-bit PPC, whereas __powerpc64__ is only active for the
latter (on Linux, that is).
Misc note: how to find out what's predefined in gcc by default:
gcc -Wp,-dM somefile.c
*/
#undef PLAT_x86_darwin
#undef PLAT_amd64_darwin
#undef PLAT_x86_win32
#undef PLAT_amd64_win64
#undef PLAT_x86_linux
#undef PLAT_amd64_linux
#undef PLAT_ppc32_linux
#undef PLAT_ppc64be_linux
#undef PLAT_ppc64le_linux
#undef PLAT_arm_linux
#undef PLAT_arm64_linux
#undef PLAT_s390x_linux
#undef PLAT_mips32_linux
#undef PLAT_mips64_linux
#undef PLAT_nanomips_linux
#undef PLAT_x86_solaris
#undef PLAT_amd64_solaris
#if defined(__APPLE__) && defined(__i386__)
# define PLAT_x86_darwin 1
#elif defined(__APPLE__) && defined(__x86_64__)
# define PLAT_amd64_darwin 1
#elif (defined(__MINGW32__) && defined(__i386__)) \
|| defined(__CYGWIN32__) \
|| (defined(_WIN32) && defined(_M_IX86))
# define PLAT_x86_win32 1
#elif (defined(__MINGW32__) && defined(__x86_64__)) \
|| (defined(_WIN32) && defined(_M_X64))
/* __MINGW32__ and _WIN32 are defined in 64 bit mode as well. */
# define PLAT_amd64_win64 1
#elif defined(__linux__) && defined(__i386__)
# define PLAT_x86_linux 1
#elif defined(__linux__) && defined(__x86_64__) && !defined(__ILP32__)
# define PLAT_amd64_linux 1
#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
# define PLAT_ppc32_linux 1
#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF != 2
/* Big Endian uses ELF version 1 */
# define PLAT_ppc64be_linux 1
#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__) && _CALL_ELF == 2
/* Little Endian uses ELF version 2 */
# define PLAT_ppc64le_linux 1
#elif defined(__linux__) && defined(__arm__) && !defined(__aarch64__)
# define PLAT_arm_linux 1
#elif defined(__linux__) && defined(__aarch64__) && !defined(__arm__)
# define PLAT_arm64_linux 1
#elif defined(__linux__) && defined(__s390__) && defined(__s390x__)
# define PLAT_s390x_linux 1
#elif defined(__linux__) && defined(__mips__) && (__mips==64)
# define PLAT_mips64_linux 1
#elif defined(__linux__) && defined(__mips__) && (__mips==32)
# define PLAT_mips32_linux 1
#elif defined(__linux__) && defined(__nanomips__)
# define PLAT_nanomips_linux 1
#elif defined(__sun) && defined(__i386__)
# define PLAT_x86_solaris 1
#elif defined(__sun) && defined(__x86_64__)
# define PLAT_amd64_solaris 1
#else
/* If we're not compiling for our target platform, don't generate
any inline asms. */
# if !defined(NVALGRIND)
# define NVALGRIND 1
# endif
#endif
/* ------------------------------------------------------------------ */
/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
/* in here of use to end-users -- skip to the next section. */
/* ------------------------------------------------------------------ */
/*
* VALGRIND_DO_CLIENT_REQUEST(): a statement that invokes a Valgrind client
* request. Accepts both pointers and integers as arguments.
*
* VALGRIND_DO_CLIENT_REQUEST_STMT(): a statement that invokes a Valgrind
* client request that does not return a value.
* VALGRIND_DO_CLIENT_REQUEST_EXPR(): a C expression that invokes a Valgrind
* client request and whose value equals the client request result. Accepts
* both pointers and integers as arguments. Note that such calls are not
* necessarily pure functions -- they may have side effects.
*/
#define VALGRIND_DO_CLIENT_REQUEST(_zzq_rlval, _zzq_default, \
_zzq_request, _zzq_arg1, _zzq_arg2, \
_zzq_arg3, _zzq_arg4, _zzq_arg5) \
do { (_zzq_rlval) = VALGRIND_DO_CLIENT_REQUEST_EXPR((_zzq_default), \
(_zzq_request), (_zzq_arg1), (_zzq_arg2), \
(_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0)
#define VALGRIND_DO_CLIENT_REQUEST_STMT(_zzq_request, _zzq_arg1, \
_zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
do { (void) VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
(_zzq_request), (_zzq_arg1), (_zzq_arg2), \
(_zzq_arg3), (_zzq_arg4), (_zzq_arg5)); } while (0)
#if defined(NVALGRIND)
/* Define NVALGRIND to completely remove the Valgrind magic sequence
from the compiled code (analogous to NDEBUG's effects on
assert()) */
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
(_zzq_default)
#else /* ! NVALGRIND */
/* The following defines the magic code sequences which the JITter
spots and handles magically. Don't look too closely at them as
they will rot your brain.
The assembly code sequences for all architectures is in this one
file. This is because this file must be stand-alone, and we don't
want to have multiple files.
For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
value gets put in the return slot, so that everything works when
this is executed not under Valgrind. Args are passed in a memory
block, and so there's no intrinsic limit to the number that could
be passed, but it's currently five.
The macro args are:
_zzq_rlval result lvalue
_zzq_default default value (result returned when running on real CPU)
_zzq_request request code
_zzq_arg1..5 request params
The other two macros are used to support function wrapping, and are
a lot simpler. VALGRIND_GET_NR_CONTEXT returns the value of the
guest's NRADDR pseudo-register and whatever other information is
needed to safely run the call original from the wrapper: on
ppc64-linux, the R2 value at the divert point is also needed. This
information is abstracted into a user-visible type, OrigFn.
VALGRIND_CALL_NOREDIR_* behaves the same as the following on the
guest, but guarantees that the branch instruction will not be
redirected: x86: call *%eax, amd64: call *%rax, ppc32/ppc64:
branch-and-link-to-r11. VALGRIND_CALL_NOREDIR is just text, not a
complete inline asm, since it needs to be combined with more magic
inline asm stuff to be useful.
*/
/* ----------------- x86-{linux,darwin,solaris} ---------------- */
#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
|| (defined(PLAT_x86_win32) && defined(__GNUC__)) \
|| defined(PLAT_x86_solaris)
typedef
struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"roll $3, %%edi ; roll $13, %%edi\n\t" \
"roll $29, %%edi ; roll $19, %%edi\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
__extension__ \
({volatile unsigned int _zzq_args[6]; \
volatile unsigned int _zzq_result; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
_zzq_args[2] = (unsigned int)(_zzq_arg2); \
_zzq_args[3] = (unsigned int)(_zzq_arg3); \
_zzq_args[4] = (unsigned int)(_zzq_arg4); \
_zzq_args[5] = (unsigned int)(_zzq_arg5); \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %EDX = client_request ( %EAX ) */ \
"xchgl %%ebx,%%ebx" \
: "=d" (_zzq_result) \
: "a" (&_zzq_args[0]), "0" (_zzq_default) \
: "cc", "memory" \
); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %EAX = guest_NRADDR */ \
"xchgl %%ecx,%%ecx" \
: "=a" (__addr) \
: \
: "cc", "memory" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_CALL_NOREDIR_EAX \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%EAX */ \
"xchgl %%edx,%%edx\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"xchgl %%edi,%%edi\n\t" \
: : : "cc", "memory" \
); \
} while (0)
#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__)
|| PLAT_x86_solaris */
/* ------------------------- x86-Win32 ------------------------- */
#if defined(PLAT_x86_win32) && !defined(__GNUC__)
typedef
struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
#if defined(_MSC_VER)
#define __SPECIAL_INSTRUCTION_PREAMBLE \
__asm rol edi, 3 __asm rol edi, 13 \
__asm rol edi, 29 __asm rol edi, 19
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
valgrind_do_client_request_expr((uintptr_t)(_zzq_default), \
(uintptr_t)(_zzq_request), (uintptr_t)(_zzq_arg1), \
(uintptr_t)(_zzq_arg2), (uintptr_t)(_zzq_arg3), \
(uintptr_t)(_zzq_arg4), (uintptr_t)(_zzq_arg5))
static __inline uintptr_t
valgrind_do_client_request_expr(uintptr_t _zzq_default, uintptr_t _zzq_request,
uintptr_t _zzq_arg1, uintptr_t _zzq_arg2,
uintptr_t _zzq_arg3, uintptr_t _zzq_arg4,
uintptr_t _zzq_arg5)
{
volatile uintptr_t _zzq_args[6];
volatile unsigned int _zzq_result;
_zzq_args[0] = (uintptr_t)(_zzq_request);
_zzq_args[1] = (uintptr_t)(_zzq_arg1);
_zzq_args[2] = (uintptr_t)(_zzq_arg2);
_zzq_args[3] = (uintptr_t)(_zzq_arg3);
_zzq_args[4] = (uintptr_t)(_zzq_arg4);
_zzq_args[5] = (uintptr_t)(_zzq_arg5);
__asm { __asm lea eax, _zzq_args __asm mov edx, _zzq_default
__SPECIAL_INSTRUCTION_PREAMBLE
/* %EDX = client_request ( %EAX ) */
__asm xchg ebx,ebx
__asm mov _zzq_result, edx
}
return _zzq_result;
}
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned int __addr; \
__asm { __SPECIAL_INSTRUCTION_PREAMBLE \
/* %EAX = guest_NRADDR */ \
__asm xchg ecx,ecx \
__asm mov __addr, eax \
} \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_CALL_NOREDIR_EAX ERROR
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm { __SPECIAL_INSTRUCTION_PREAMBLE \
__asm xchg edi,edi \
} \
} while (0)
#else
#error Unsupported compiler.
#endif
#endif /* PLAT_x86_win32 */
/* ----------------- amd64-{linux,darwin,solaris} --------------- */
#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
|| defined(PLAT_amd64_solaris) \
|| (defined(PLAT_amd64_win64) && defined(__GNUC__))
typedef
struct {
unsigned long int nraddr; /* where's the code? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
"rolq $61, %%rdi ; rolq $51, %%rdi\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
__extension__ \
({ volatile unsigned long int _zzq_args[6]; \
volatile unsigned long int _zzq_result; \
_zzq_args[0] = (unsigned long int)(_zzq_request); \
_zzq_args[1] = (unsigned long int)(_zzq_arg1); \
_zzq_args[2] = (unsigned long int)(_zzq_arg2); \
_zzq_args[3] = (unsigned long int)(_zzq_arg3); \
_zzq_args[4] = (unsigned long int)(_zzq_arg4); \
_zzq_args[5] = (unsigned long int)(_zzq_arg5); \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %RDX = client_request ( %RAX ) */ \
"xchgq %%rbx,%%rbx" \
: "=d" (_zzq_result) \
: "a" (&_zzq_args[0]), "0" (_zzq_default) \
: "cc", "memory" \
); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %RAX = guest_NRADDR */ \
"xchgq %%rcx,%%rcx" \
: "=a" (__addr) \
: \
: "cc", "memory" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_CALL_NOREDIR_RAX \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%RAX */ \
"xchgq %%rdx,%%rdx\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"xchgq %%rdi,%%rdi\n\t" \
: : : "cc", "memory" \
); \
} while (0)
#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */
/* ------------------------- amd64-Win64 ------------------------- */
#if defined(PLAT_amd64_win64) && !defined(__GNUC__)
#error Unsupported compiler.
#endif /* PLAT_amd64_win64 */
/* ------------------------ ppc32-linux ------------------------ */
#if defined(PLAT_ppc32_linux)
typedef
struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"rlwinm 0,0,3,0,31 ; rlwinm 0,0,13,0,31\n\t" \
"rlwinm 0,0,29,0,31 ; rlwinm 0,0,19,0,31\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
__extension__ \
({ unsigned int _zzq_args[6]; \
unsigned int _zzq_result; \
unsigned int* _zzq_ptr; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
_zzq_args[2] = (unsigned int)(_zzq_arg2); \
_zzq_args[3] = (unsigned int)(_zzq_arg3); \
_zzq_args[4] = (unsigned int)(_zzq_arg4); \
_zzq_args[5] = (unsigned int)(_zzq_arg5); \
_zzq_ptr = _zzq_args; \
__asm__ volatile("mr 3,%1\n\t" /*default*/ \
"mr 4,%2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = client_request ( %R4 ) */ \
"or 1,1,1\n\t" \
"mr %0,3" /*result*/ \
: "=b" (_zzq_result) \
: "b" (_zzq_default), "b" (_zzq_ptr) \
: "cc", "memory", "r3", "r4"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
unsigned int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \
"or 2,2,2\n\t" \
"mr %0,3" \
: "=b" (__addr) \
: \
: "cc", "memory", "r3" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* branch-and-link-to-noredir *%R11 */ \
"or 3,3,3\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"or 5,5,5\n\t" \
); \
} while (0)
#endif /* PLAT_ppc32_linux */
/* ------------------------ ppc64-linux ------------------------ */
#if defined(PLAT_ppc64be_linux)
typedef
struct {
unsigned long int nraddr; /* where's the code? */
unsigned long int r2; /* what tocptr do we need? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
"rotldi 0,0,61 ; rotldi 0,0,51\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
__extension__ \
({ unsigned long int _zzq_args[6]; \
unsigned long int _zzq_result; \
unsigned long int* _zzq_ptr; \
_zzq_args[0] = (unsigned long int)(_zzq_request); \
_zzq_args[1] = (unsigned long int)(_zzq_arg1); \
_zzq_args[2] = (unsigned long int)(_zzq_arg2); \
_zzq_args[3] = (unsigned long int)(_zzq_arg3); \
_zzq_args[4] = (unsigned long int)(_zzq_arg4); \
_zzq_args[5] = (unsigned long int)(_zzq_arg5); \
_zzq_ptr = _zzq_args; \
__asm__ volatile("mr 3,%1\n\t" /*default*/ \
"mr 4,%2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = client_request ( %R4 ) */ \
"or 1,1,1\n\t" \
"mr %0,3" /*result*/ \
: "=b" (_zzq_result) \
: "b" (_zzq_default), "b" (_zzq_ptr) \
: "cc", "memory", "r3", "r4"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \
"or 2,2,2\n\t" \
"mr %0,3" \
: "=b" (__addr) \
: \
: "cc", "memory", "r3" \
); \
_zzq_orig->nraddr = __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR_GPR2 */ \
"or 4,4,4\n\t" \
"mr %0,3" \
: "=b" (__addr) \
: \
: "cc", "memory", "r3" \
); \
_zzq_orig->r2 = __addr; \
}
#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* branch-and-link-to-noredir *%R11 */ \
"or 3,3,3\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"or 5,5,5\n\t" \
); \
} while (0)
#endif /* PLAT_ppc64be_linux */
#if defined(PLAT_ppc64le_linux)
typedef
struct {
unsigned long int nraddr; /* where's the code? */
unsigned long int r2; /* what tocptr do we need? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
"rotldi 0,0,61 ; rotldi 0,0,51\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
__extension__ \
({ unsigned long int _zzq_args[6]; \
unsigned long int _zzq_result; \
unsigned long int* _zzq_ptr; \
_zzq_args[0] = (unsigned long int)(_zzq_request); \
_zzq_args[1] = (unsigned long int)(_zzq_arg1); \
_zzq_args[2] = (unsigned long int)(_zzq_arg2); \
_zzq_args[3] = (unsigned long int)(_zzq_arg3); \
_zzq_args[4] = (unsigned long int)(_zzq_arg4); \
_zzq_args[5] = (unsigned long int)(_zzq_arg5); \
_zzq_ptr = _zzq_args; \
__asm__ volatile("mr 3,%1\n\t" /*default*/ \
"mr 4,%2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = client_request ( %R4 ) */ \
"or 1,1,1\n\t" \
"mr %0,3" /*result*/ \
: "=b" (_zzq_result) \
: "b" (_zzq_default), "b" (_zzq_ptr) \
: "cc", "memory", "r3", "r4"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR */ \
"or 2,2,2\n\t" \
"mr %0,3" \
: "=b" (__addr) \
: \
: "cc", "memory", "r3" \
); \
_zzq_orig->nraddr = __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %R3 = guest_NRADDR_GPR2 */ \
"or 4,4,4\n\t" \
"mr %0,3" \
: "=b" (__addr) \
: \
: "cc", "memory", "r3" \
); \
_zzq_orig->r2 = __addr; \
}
#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* branch-and-link-to-noredir *%R12 */ \
"or 3,3,3\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"or 5,5,5\n\t" \
); \
} while (0)
#endif /* PLAT_ppc64le_linux */
/* ------------------------- arm-linux ------------------------- */
#if defined(PLAT_arm_linux)
typedef
struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
"mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
__extension__ \
({volatile unsigned int _zzq_args[6]; \
volatile unsigned int _zzq_result; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
_zzq_args[2] = (unsigned int)(_zzq_arg2); \
_zzq_args[3] = (unsigned int)(_zzq_arg3); \
_zzq_args[4] = (unsigned int)(_zzq_arg4); \
_zzq_args[5] = (unsigned int)(_zzq_arg5); \
__asm__ volatile("mov r3, %1\n\t" /*default*/ \
"mov r4, %2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* R3 = client_request ( R4 ) */ \
"orr r10, r10, r10\n\t" \
"mov %0, r3" /*result*/ \
: "=r" (_zzq_result) \
: "r" (_zzq_default), "r" (&_zzq_args[0]) \
: "cc","memory", "r3", "r4"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
unsigned int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* R3 = guest_NRADDR */ \
"orr r11, r11, r11\n\t" \
"mov %0, r3" \
: "=r" (__addr) \
: \
: "cc", "memory", "r3" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* branch-and-link-to-noredir *%R4 */ \
"orr r12, r12, r12\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"orr r9, r9, r9\n\t" \
: : : "cc", "memory" \
); \
} while (0)
#endif /* PLAT_arm_linux */
/* ------------------------ arm64-linux ------------------------- */
#if defined(PLAT_arm64_linux)
typedef
struct {
unsigned long int nraddr; /* where's the code? */
}
OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"ror x12, x12, #3 ; ror x12, x12, #13 \n\t" \
"ror x12, x12, #51 ; ror x12, x12, #61 \n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
\
__extension__ \
({volatile unsigned long int _zzq_args[6]; \
volatile unsigned long int _zzq_result; \
_zzq_args[0] = (unsigned long int)(_zzq_request); \
_zzq_args[1] = (unsigned long int)(_zzq_arg1); \
_zzq_args[2] = (unsigned long int)(_zzq_arg2); \
_zzq_args[3] = (unsigned long int)(_zzq_arg3); \
_zzq_args[4] = (unsigned long int)(_zzq_arg4); \
_zzq_args[5] = (unsigned long int)(_zzq_arg5); \
__asm__ volatile("mov x3, %1\n\t" /*default*/ \
"mov x4, %2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* X3 = client_request ( X4 ) */ \
"orr x10, x10, x10\n\t" \
"mov %0, x3" /*result*/ \
: "=r" (_zzq_result) \
: "r" ((unsigned long int)(_zzq_default)), \
"r" (&_zzq_args[0]) \
: "cc","memory", "x3", "x4"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* X3 = guest_NRADDR */ \
"orr x11, x11, x11\n\t" \
"mov %0, x3" \
: "=r" (__addr) \
: \
: "cc", "memory", "x3" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* branch-and-link-to-noredir X8 */ \
"orr x12, x12, x12\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"orr x9, x9, x9\n\t" \
: : : "cc", "memory" \
); \
} while (0)
#endif /* PLAT_arm64_linux */
/* ------------------------ s390x-linux ------------------------ */
#if defined(PLAT_s390x_linux)
typedef
struct {
unsigned long int nraddr; /* where's the code? */
}
OrigFn;
/* __SPECIAL_INSTRUCTION_PREAMBLE will be used to identify Valgrind specific
* code. This detection is implemented in platform specific toIR.c
* (e.g. VEX/priv/guest_s390_decoder.c).
*/
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"lr 15,15\n\t" \
"lr 1,1\n\t" \
"lr 2,2\n\t" \
"lr 3,3\n\t"
#define __CLIENT_REQUEST_CODE "lr 2,2\n\t"
#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t"
#define __CALL_NO_REDIR_CODE "lr 4,4\n\t"
#define __VEX_INJECT_IR_CODE "lr 5,5\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
__extension__ \
({volatile unsigned long int _zzq_args[6]; \
volatile unsigned long int _zzq_result; \
_zzq_args[0] = (unsigned long int)(_zzq_request); \
_zzq_args[1] = (unsigned long int)(_zzq_arg1); \
_zzq_args[2] = (unsigned long int)(_zzq_arg2); \
_zzq_args[3] = (unsigned long int)(_zzq_arg3); \
_zzq_args[4] = (unsigned long int)(_zzq_arg4); \
_zzq_args[5] = (unsigned long int)(_zzq_arg5); \
__asm__ volatile(/* r2 = args */ \
"lgr 2,%1\n\t" \
/* r3 = default */ \
"lgr 3,%2\n\t" \
__SPECIAL_INSTRUCTION_PREAMBLE \
__CLIENT_REQUEST_CODE \
/* results = r3 */ \
"lgr %0, 3\n\t" \
: "=d" (_zzq_result) \
: "a" (&_zzq_args[0]), "0" (_zzq_default) \
: "cc", "2", "3", "memory" \
); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
__GET_NR_CONTEXT_CODE \
"lgr %0, 3\n\t" \
: "=a" (__addr) \
: \
: "cc", "3", "memory" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_CALL_NOREDIR_R1 \
__SPECIAL_INSTRUCTION_PREAMBLE \
__CALL_NO_REDIR_CODE
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
__VEX_INJECT_IR_CODE); \
} while (0)
#endif /* PLAT_s390x_linux */
/* ------------------------- mips32-linux ---------------- */
#if defined(PLAT_mips32_linux)
typedef
struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
/* .word 0x342
* .word 0x742
* .word 0xC2
* .word 0x4C2*/
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"srl $0, $0, 13\n\t" \
"srl $0, $0, 29\n\t" \
"srl $0, $0, 3\n\t" \
"srl $0, $0, 19\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
__extension__ \
({ volatile unsigned int _zzq_args[6]; \
volatile unsigned int _zzq_result; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
_zzq_args[2] = (unsigned int)(_zzq_arg2); \
_zzq_args[3] = (unsigned int)(_zzq_arg3); \
_zzq_args[4] = (unsigned int)(_zzq_arg4); \
_zzq_args[5] = (unsigned int)(_zzq_arg5); \
__asm__ volatile("move $11, %1\n\t" /*default*/ \
"move $12, %2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* T3 = client_request ( T4 ) */ \
"or $13, $13, $13\n\t" \
"move %0, $11\n\t" /*result*/ \
: "=r" (_zzq_result) \
: "r" (_zzq_default), "r" (&_zzq_args[0]) \
: "$11", "$12", "memory"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* %t9 = guest_NRADDR */ \
"or $14, $14, $14\n\t" \
"move %0, $11" /*result*/ \
: "=r" (__addr) \
: \
: "$11" \
); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_CALL_NOREDIR_T9 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%t9 */ \
"or $15, $15, $15\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"or $11, $11, $11\n\t" \
); \
} while (0)
#endif /* PLAT_mips32_linux */
/* ------------------------- mips64-linux ---------------- */
#if defined(PLAT_mips64_linux)
typedef
struct {
unsigned long nraddr; /* where's the code? */
}
OrigFn;
/* dsll $0,$0, 3
* dsll $0,$0, 13
* dsll $0,$0, 29
* dsll $0,$0, 19*/
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \
"dsll $0,$0,29 ; dsll $0,$0,19\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
__extension__ \
({ volatile unsigned long int _zzq_args[6]; \
volatile unsigned long int _zzq_result; \
_zzq_args[0] = (unsigned long int)(_zzq_request); \
_zzq_args[1] = (unsigned long int)(_zzq_arg1); \
_zzq_args[2] = (unsigned long int)(_zzq_arg2); \
_zzq_args[3] = (unsigned long int)(_zzq_arg3); \
_zzq_args[4] = (unsigned long int)(_zzq_arg4); \
_zzq_args[5] = (unsigned long int)(_zzq_arg5); \
__asm__ volatile("move $11, %1\n\t" /*default*/ \
"move $12, %2\n\t" /*ptr*/ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* $11 = client_request ( $12 ) */ \
"or $13, $13, $13\n\t" \
"move %0, $11\n\t" /*result*/ \
: "=r" (_zzq_result) \
: "r" (_zzq_default), "r" (&_zzq_args[0]) \
: "$11", "$12", "memory"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* $11 = guest_NRADDR */ \
"or $14, $14, $14\n\t" \
"move %0, $11" /*result*/ \
: "=r" (__addr) \
: \
: "$11"); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_CALL_NOREDIR_T9 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir $25 */ \
"or $15, $15, $15\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"or $11, $11, $11\n\t" \
); \
} while (0)
#endif /* PLAT_mips64_linux */
#if defined(PLAT_nanomips_linux)
typedef
struct {
unsigned int nraddr; /* where's the code? */
}
OrigFn;
/*
8000 c04d srl zero, zero, 13
8000 c05d srl zero, zero, 29
8000 c043 srl zero, zero, 3
8000 c053 srl zero, zero, 19
*/
#define __SPECIAL_INSTRUCTION_PREAMBLE "srl[32] $zero, $zero, 13 \n\t" \
"srl[32] $zero, $zero, 29 \n\t" \
"srl[32] $zero, $zero, 3 \n\t" \
"srl[32] $zero, $zero, 19 \n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
_zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
__extension__ \
({ volatile unsigned int _zzq_args[6]; \
volatile unsigned int _zzq_result; \
_zzq_args[0] = (unsigned int)(_zzq_request); \
_zzq_args[1] = (unsigned int)(_zzq_arg1); \
_zzq_args[2] = (unsigned int)(_zzq_arg2); \
_zzq_args[3] = (unsigned int)(_zzq_arg3); \
_zzq_args[4] = (unsigned int)(_zzq_arg4); \
_zzq_args[5] = (unsigned int)(_zzq_arg5); \
__asm__ volatile("move $a7, %1\n\t" /* default */ \
"move $t0, %2\n\t" /* ptr */ \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* $a7 = client_request( $t0 ) */ \
"or[32] $t0, $t0, $t0\n\t" \
"move %0, $a7\n\t" /* result */ \
: "=r" (_zzq_result) \
: "r" (_zzq_default), "r" (&_zzq_args[0]) \
: "$a7", "$t0", "memory"); \
_zzq_result; \
})
#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
{ volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
volatile unsigned long int __addr; \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
/* $a7 = guest_NRADDR */ \
"or[32] $t1, $t1, $t1\n\t" \
"move %0, $a7" /*result*/ \
: "=r" (__addr) \
: \
: "$a7"); \
_zzq_orig->nraddr = __addr; \
}
#define VALGRIND_CALL_NOREDIR_T9 \
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir $25 */ \
"or[32] $t2, $t2, $t2\n\t"
#define VALGRIND_VEX_INJECT_IR() \
do { \
__asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
"or[32] $t3, $t3, $t3\n\t" \
); \
} while (0)
#endif
/* Insert assembly code for other platforms here... */
#endif /* NVALGRIND */
/* ------------------------------------------------------------------ */
/* PLATFORM SPECIFICS for FUNCTION WRAPPING. This is all very */
/* ugly. It's the least-worst tradeoff I can think of. */
/* ------------------------------------------------------------------ */
/* This section defines magic (a.k.a appalling-hack) macros for doing
guaranteed-no-redirection macros, so as to get from function
wrappers to the functions they are wrapping. The whole point is to
construct standard call sequences, but to do the call itself with a
special no-redirect call pseudo-instruction that the JIT
understands and handles specially. This section is long and
repetitious, and I can't see a way to make it shorter.
The naming scheme is as follows:
CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
'W' stands for "word" and 'v' for "void". Hence there are
different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
and for each, the possibility of returning a word-typed result, or
no result.
*/
/* Use these to write the name of your wrapper. NOTE: duplicates
VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. NOTE also: inserts
the default behaviour equivalance class tag "0000" into the name.
See pub_tool_redir.h for details -- normally you don't need to
think about this, though. */
/* Use an extra level of macroisation so as to ensure the soname/fnname
args are fully macro-expanded before pasting them together. */
#define VG_CONCAT4(_aa,_bb,_cc,_dd) _aa##_bb##_cc##_dd
#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
VG_CONCAT4(_vgw00000ZU_,soname,_,fnname)
#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
VG_CONCAT4(_vgw00000ZZ_,soname,_,fnname)
/* Use this macro from within a wrapper function to collect the
context (address and possibly other info) of the original function.
Once you have that you can then use it in one of the CALL_FN_
macros. The type of the argument _lval is OrigFn. */
#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
/* Also provide end-user facilities for function replacement, rather
than wrapping. A replacement function differs from a wrapper in
that it has no way to get hold of the original function being
called, and hence no way to call onwards to it. In a replacement
function, VALGRIND_GET_ORIG_FN always returns zero. */
#define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \
VG_CONCAT4(_vgr00000ZU_,soname,_,fnname)
#define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \
VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname)
/* Derivatives of the main macros below, for calling functions
returning void. */
#define CALL_FN_v_v(fnptr) \
do { volatile unsigned long _junk; \
CALL_FN_W_v(_junk,fnptr); } while (0)
#define CALL_FN_v_W(fnptr, arg1) \
do { volatile unsigned long _junk; \
CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
#define CALL_FN_v_WW(fnptr, arg1,arg2) \
do { volatile unsigned long _junk; \
CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
#define CALL_FN_v_WWW(fnptr, arg1,arg2,arg3) \
do { volatile unsigned long _junk; \
CALL_FN_W_WWW(_junk,fnptr,arg1,arg2,arg3); } while (0)
#define CALL_FN_v_WWWW(fnptr, arg1,arg2,arg3,arg4) \
do { volatile unsigned long _junk; \
CALL_FN_W_WWWW(_junk,fnptr,arg1,arg2,arg3,arg4); } while (0)
#define CALL_FN_v_5W(fnptr, arg1,arg2,arg3,arg4,arg5) \
do { volatile unsigned long _junk; \
CALL_FN_W_5W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5); } while (0)
#define CALL_FN_v_6W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
do { volatile unsigned long _junk; \
CALL_FN_W_6W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6); } while (0)
#define CALL_FN_v_7W(fnptr, arg1,arg2,arg3,arg4,arg5,arg6,arg7) \
do { volatile unsigned long _junk; \
CALL_FN_W_7W(_junk,fnptr,arg1,arg2,arg3,arg4,arg5,arg6,arg7); } while (0)
/* ----------------- x86-{linux,darwin,solaris} ---------------- */
#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin) \
|| defined(PLAT_x86_solaris)
/* These regs are trashed by the hidden call. No need to mention eax
as gcc can already see that, plus causes gcc to bomb. */
#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
/* Macros to save and align the stack before making a function
call and restore it afterwards as gcc may not keep the stack
pointer aligned if it doesn't realise calls are being made
to other functions. */
#define VALGRIND_ALIGN_STACK \
"movl %%esp,%%edi\n\t" \
"andl $0xfffffff0,%%esp\n\t"
#define VALGRIND_RESTORE_STACK \
"movl %%edi,%%esp\n\t"
/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
long) == 4. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $12, %%esp\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $8, %%esp\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $4, %%esp\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $12, %%esp\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $8, %%esp\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $4, %%esp\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $12, %%esp\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $8, %%esp\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"subl $4, %%esp\n\t" \
"pushl 44(%%eax)\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"pushl 48(%%eax)\n\t" \
"pushl 44(%%eax)\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_x86_linux || PLAT_x86_darwin || PLAT_x86_solaris */
/* ---------------- amd64-{linux,darwin,solaris} --------------- */
#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
|| defined(PLAT_amd64_solaris)
/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
"rdi", "r8", "r9", "r10", "r11"
/* This is all pretty complex. It's so as to make stack unwinding
work reliably. See bug 243270. The basic problem is the sub and
add of 128 of %rsp in all of the following macros. If gcc believes
the CFA is in %rsp, then unwinding may fail, because what's at the
CFA is not what gcc "expected" when it constructs the CFIs for the
places where the macros are instantiated.
But we can't just add a CFI annotation to increase the CFA offset
by 128, to match the sub of 128 from %rsp, because we don't know
whether gcc has chosen %rsp as the CFA at that point, or whether it
has chosen some other register (eg, %rbp). In the latter case,
adding a CFI annotation to change the CFA offset is simply wrong.
So the solution is to get hold of the CFA using
__builtin_dwarf_cfa(), put it in a known register, and add a
CFI annotation to say what the register is. We choose %rbp for
this (perhaps perversely), because:
(1) %rbp is already subject to unwinding. If a new register was
chosen then the unwinder would have to unwind it in all stack
traces, which is expensive, and
(2) %rbp is already subject to precise exception updates in the
JIT. If a new register was chosen, we'd have to have precise
exceptions for it too, which reduces performance of the
generated code.
However .. one extra complication. We can't just whack the result
of __builtin_dwarf_cfa() into %rbp and then add %rbp to the
list of trashed registers at the end of the inline assembly
fragments; gcc won't allow %rbp to appear in that list. Hence
instead we need to stash %rbp in %r15 for the duration of the asm,
and say that %r15 is trashed instead. gcc seems happy to go with
that.
Oh .. and this all needs to be conditionalised so that it is
unchanged from before this commit, when compiled with older gccs
that don't support __builtin_dwarf_cfa. Furthermore, since
this header file is freestanding, it has to be independent of
config.h, and so the following conditionalisation cannot depend on
configure time checks.
Although it's not clear from
'defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)',
this expression excludes Darwin.
.cfi directives in Darwin assembly appear to be completely
different and I haven't investigated how they work.
For even more entertainment value, note we have to use the
completely undocumented __builtin_dwarf_cfa(), which appears to
really compute the CFA, whereas __builtin_frame_address(0) claims
to but actually doesn't. See
https://bugs.kde.org/show_bug.cgi?id=243270#c47
*/
#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
# define __FRAME_POINTER \
,"r"(__builtin_dwarf_cfa())
# define VALGRIND_CFI_PROLOGUE \
"movq %%rbp, %%r15\n\t" \
"movq %2, %%rbp\n\t" \
".cfi_remember_state\n\t" \
".cfi_def_cfa rbp, 0\n\t"
# define VALGRIND_CFI_EPILOGUE \
"movq %%r15, %%rbp\n\t" \
".cfi_restore_state\n\t"
#else
# define __FRAME_POINTER
# define VALGRIND_CFI_PROLOGUE
# define VALGRIND_CFI_EPILOGUE
#endif
/* Macros to save and align the stack before making a function
call and restore it afterwards as gcc may not keep the stack
pointer aligned if it doesn't realise calls are being made
to other functions. */
#define VALGRIND_ALIGN_STACK \
"movq %%rsp,%%r14\n\t" \
"andq $0xfffffffffffffff0,%%rsp\n\t"
#define VALGRIND_RESTORE_STACK \
"movq %%r14,%%rsp\n\t"
/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
long) == 8. */
/* NB 9 Sept 07. There is a nasty kludge here in all these CALL_FN_
macros. In order not to trash the stack redzone, we need to drop
%rsp by 128 before the hidden call, and restore afterwards. The
nastyness is that it is only by luck that the stack still appears
to be unwindable during the hidden call - since then the behaviour
of any routine using this macro does not match what the CFI data
says. Sigh.
Why is this important? Imagine that a wrapper has a stack
allocated local, and passes to the hidden call, a pointer to it.
Because gcc does not know about the hidden call, it may allocate
that local in the redzone. Unfortunately the hidden call may then
trash it before it comes to use it. So we must step clear of the
redzone, for the duration of the hidden call, to make it safe.
Probably the same problem afflicts the other redzone-style ABIs too
(ppc64-linux); but for those, the stack is
self describing (none of this CFI nonsense) so at least messing
with the stack pointer doesn't give a danger of non-unwindable
stack. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $136,%%rsp\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $136,%%rsp\n\t" \
"pushq 72(%%rax)\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"pushq 80(%%rax)\n\t" \
"pushq 72(%%rax)\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $136,%%rsp\n\t" \
"pushq 88(%%rax)\n\t" \
"pushq 80(%%rax)\n\t" \
"pushq 72(%%rax)\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
VALGRIND_ALIGN_STACK \
"subq $128,%%rsp\n\t" \
"pushq 96(%%rax)\n\t" \
"pushq 88(%%rax)\n\t" \
"pushq 80(%%rax)\n\t" \
"pushq 72(%%rax)\n\t" \
"pushq 64(%%rax)\n\t" \
"pushq 56(%%rax)\n\t" \
"movq 48(%%rax), %%r9\n\t" \
"movq 40(%%rax), %%r8\n\t" \
"movq 32(%%rax), %%rcx\n\t" \
"movq 24(%%rax), %%rdx\n\t" \
"movq 16(%%rax), %%rsi\n\t" \
"movq 8(%%rax), %%rdi\n\t" \
"movq (%%rax), %%rax\n\t" /* target->%rax */ \
VALGRIND_CALL_NOREDIR_RAX \
VALGRIND_RESTORE_STACK \
VALGRIND_CFI_EPILOGUE \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */
/* ------------------------ ppc32-linux ------------------------ */
#if defined(PLAT_ppc32_linux)
/* This is useful for finding out about the on-stack stuff:
extern int f9 ( int,int,int,int,int,int,int,int,int );
extern int f10 ( int,int,int,int,int,int,int,int,int,int );
extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
int g9 ( void ) {
return f9(11,22,33,44,55,66,77,88,99);
}
int g10 ( void ) {
return f10(11,22,33,44,55,66,77,88,99,110);
}
int g11 ( void ) {
return f11(11,22,33,44,55,66,77,88,99,110,121);
}
int g12 ( void ) {
return f12(11,22,33,44,55,66,77,88,99,110,121,132);
}
*/
/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS \
"lr", "ctr", "xer", \
"cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
"r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
"r11", "r12", "r13"
/* Macros to save and align the stack before making a function
call and restore it afterwards as gcc may not keep the stack
pointer aligned if it doesn't realise calls are being made
to other functions. */
#define VALGRIND_ALIGN_STACK \
"mr 28,1\n\t" \
"rlwinm 1,1,0,0,27\n\t"
#define VALGRIND_RESTORE_STACK \
"mr 1,28\n\t"
/* These CALL_FN_ macros assume that on ppc32-linux,
sizeof(unsigned long) == 4. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 9,28(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 9,28(11)\n\t" \
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-16\n\t" \
/* arg9 */ \
"lwz 3,36(11)\n\t" \
"stw 3,8(1)\n\t" \
/* args1-8 */ \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 9,28(11)\n\t" \
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
_argvec[10] = (unsigned long)arg10; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-16\n\t" \
/* arg10 */ \
"lwz 3,40(11)\n\t" \
"stw 3,12(1)\n\t" \
/* arg9 */ \
"lwz 3,36(11)\n\t" \
"stw 3,8(1)\n\t" \
/* args1-8 */ \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 9,28(11)\n\t" \
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
_argvec[10] = (unsigned long)arg10; \
_argvec[11] = (unsigned long)arg11; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-32\n\t" \
/* arg11 */ \
"lwz 3,44(11)\n\t" \
"stw 3,16(1)\n\t" \
/* arg10 */ \
"lwz 3,40(11)\n\t" \
"stw 3,12(1)\n\t" \
/* arg9 */ \
"lwz 3,36(11)\n\t" \
"stw 3,8(1)\n\t" \
/* args1-8 */ \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 9,28(11)\n\t" \
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
_argvec[10] = (unsigned long)arg10; \
_argvec[11] = (unsigned long)arg11; \
_argvec[12] = (unsigned long)arg12; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-32\n\t" \
/* arg12 */ \
"lwz 3,48(11)\n\t" \
"stw 3,20(1)\n\t" \
/* arg11 */ \
"lwz 3,44(11)\n\t" \
"stw 3,16(1)\n\t" \
/* arg10 */ \
"lwz 3,40(11)\n\t" \
"stw 3,12(1)\n\t" \
/* arg9 */ \
"lwz 3,36(11)\n\t" \
"stw 3,8(1)\n\t" \
/* args1-8 */ \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
"lwz 7,20(11)\n\t" \
"lwz 8,24(11)\n\t" \
"lwz 9,28(11)\n\t" \
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_ppc32_linux */
/* ------------------------ ppc64-linux ------------------------ */
#if defined(PLAT_ppc64be_linux)
/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS \
"lr", "ctr", "xer", \
"cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
"r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
"r11", "r12", "r13"
/* Macros to save and align the stack before making a function
call and restore it afterwards as gcc may not keep the stack
pointer aligned if it doesn't realise calls are being made
to other functions. */
#define VALGRIND_ALIGN_STACK \
"mr 28,1\n\t" \
"rldicr 1,1,0,59\n\t"
#define VALGRIND_RESTORE_STACK \
"mr 1,28\n\t"
/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
long) == 8. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+0]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+1]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+2]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+3]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+4]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+5]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+6]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+7]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 9, 56(11)\n\t" /* arg7->r9 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+8]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 9, 56(11)\n\t" /* arg7->r9 */ \
"ld 10, 64(11)\n\t" /* arg8->r10 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+9]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-128\n\t" /* expand stack frame */ \
/* arg9 */ \
"ld 3,72(11)\n\t" \
"std 3,112(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 9, 56(11)\n\t" /* arg7->r9 */ \
"ld 10, 64(11)\n\t" /* arg8->r10 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+10]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-128\n\t" /* expand stack frame */ \
/* arg10 */ \
"ld 3,80(11)\n\t" \
"std 3,120(1)\n\t" \
/* arg9 */ \
"ld 3,72(11)\n\t" \
"std 3,112(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 9, 56(11)\n\t" /* arg7->r9 */ \
"ld 10, 64(11)\n\t" /* arg8->r10 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+11]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
_argvec[2+11] = (unsigned long)arg11; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-144\n\t" /* expand stack frame */ \
/* arg11 */ \
"ld 3,88(11)\n\t" \
"std 3,128(1)\n\t" \
/* arg10 */ \
"ld 3,80(11)\n\t" \
"std 3,120(1)\n\t" \
/* arg9 */ \
"ld 3,72(11)\n\t" \
"std 3,112(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 9, 56(11)\n\t" /* arg7->r9 */ \
"ld 10, 64(11)\n\t" /* arg8->r10 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+12]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
_argvec[2+11] = (unsigned long)arg11; \
_argvec[2+12] = (unsigned long)arg12; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-144\n\t" /* expand stack frame */ \
/* arg12 */ \
"ld 3,96(11)\n\t" \
"std 3,136(1)\n\t" \
/* arg11 */ \
"ld 3,88(11)\n\t" \
"std 3,128(1)\n\t" \
/* arg10 */ \
"ld 3,80(11)\n\t" \
"std 3,120(1)\n\t" \
/* arg9 */ \
"ld 3,72(11)\n\t" \
"std 3,112(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(11)\n\t" /* arg1->r3 */ \
"ld 4, 16(11)\n\t" /* arg2->r4 */ \
"ld 5, 24(11)\n\t" /* arg3->r5 */ \
"ld 6, 32(11)\n\t" /* arg4->r6 */ \
"ld 7, 40(11)\n\t" /* arg5->r7 */ \
"ld 8, 48(11)\n\t" /* arg6->r8 */ \
"ld 9, 56(11)\n\t" /* arg7->r9 */ \
"ld 10, 64(11)\n\t" /* arg8->r10 */ \
"ld 11, 0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_ppc64be_linux */
/* ------------------------- ppc64le-linux ----------------------- */
#if defined(PLAT_ppc64le_linux)
/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS \
"lr", "ctr", "xer", \
"cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
"r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
"r11", "r12", "r13"
/* Macros to save and align the stack before making a function
call and restore it afterwards as gcc may not keep the stack
pointer aligned if it doesn't realise calls are being made
to other functions. */
#define VALGRIND_ALIGN_STACK \
"mr 28,1\n\t" \
"rldicr 1,1,0,59\n\t"
#define VALGRIND_RESTORE_STACK \
"mr 1,28\n\t"
/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
long) == 8. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+0]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+1]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+2]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+3]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+4]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+5]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+6]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+7]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 9, 56(12)\n\t" /* arg7->r9 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+8]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 9, 56(12)\n\t" /* arg7->r9 */ \
"ld 10, 64(12)\n\t" /* arg8->r10 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+9]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-128\n\t" /* expand stack frame */ \
/* arg9 */ \
"ld 3,72(12)\n\t" \
"std 3,96(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 9, 56(12)\n\t" /* arg7->r9 */ \
"ld 10, 64(12)\n\t" /* arg8->r10 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+10]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-128\n\t" /* expand stack frame */ \
/* arg10 */ \
"ld 3,80(12)\n\t" \
"std 3,104(1)\n\t" \
/* arg9 */ \
"ld 3,72(12)\n\t" \
"std 3,96(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 9, 56(12)\n\t" /* arg7->r9 */ \
"ld 10, 64(12)\n\t" /* arg8->r10 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+11]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
_argvec[2+11] = (unsigned long)arg11; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-144\n\t" /* expand stack frame */ \
/* arg11 */ \
"ld 3,88(12)\n\t" \
"std 3,112(1)\n\t" \
/* arg10 */ \
"ld 3,80(12)\n\t" \
"std 3,104(1)\n\t" \
/* arg9 */ \
"ld 3,72(12)\n\t" \
"std 3,96(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 9, 56(12)\n\t" /* arg7->r9 */ \
"ld 10, 64(12)\n\t" /* arg8->r10 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3+12]; \
volatile unsigned long _res; \
/* _argvec[0] holds current r2 across the call */ \
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
_argvec[2+11] = (unsigned long)arg11; \
_argvec[2+12] = (unsigned long)arg12; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"mr 12,%1\n\t" \
"std 2,-16(12)\n\t" /* save tocptr */ \
"ld 2,-8(12)\n\t" /* use nraddr's tocptr */ \
"addi 1,1,-144\n\t" /* expand stack frame */ \
/* arg12 */ \
"ld 3,96(12)\n\t" \
"std 3,120(1)\n\t" \
/* arg11 */ \
"ld 3,88(12)\n\t" \
"std 3,112(1)\n\t" \
/* arg10 */ \
"ld 3,80(12)\n\t" \
"std 3,104(1)\n\t" \
/* arg9 */ \
"ld 3,72(12)\n\t" \
"std 3,96(1)\n\t" \
/* args1-8 */ \
"ld 3, 8(12)\n\t" /* arg1->r3 */ \
"ld 4, 16(12)\n\t" /* arg2->r4 */ \
"ld 5, 24(12)\n\t" /* arg3->r5 */ \
"ld 6, 32(12)\n\t" /* arg4->r6 */ \
"ld 7, 40(12)\n\t" /* arg5->r7 */ \
"ld 8, 48(12)\n\t" /* arg6->r8 */ \
"ld 9, 56(12)\n\t" /* arg7->r9 */ \
"ld 10, 64(12)\n\t" /* arg8->r10 */ \
"ld 12, 0(12)\n\t" /* target->r12 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R12 \
"mr 12,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(12)\n\t" /* restore tocptr */ \
VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_ppc64le_linux */
/* ------------------------- arm-linux ------------------------- */
#if defined(PLAT_arm_linux)
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4", "r12", "r14"
/* Macros to save and align the stack before making a function
call and restore it afterwards as gcc may not keep the stack
pointer aligned if it doesn't realise calls are being made
to other functions. */
/* This is a bit tricky. We store the original stack pointer in r10
as it is callee-saves. gcc doesn't allow the use of r11 for some
reason. Also, we can't directly "bic" the stack pointer in thumb
mode since r13 isn't an allowed register number in that context.
So use r4 as a temporary, since that is about to get trashed
anyway, just after each use of this macro. Side effect is we need
to be very careful about any future changes, since
VALGRIND_ALIGN_STACK simply assumes r4 is usable. */
#define VALGRIND_ALIGN_STACK \
"mov r10, sp\n\t" \
"mov r4, sp\n\t" \
"bic r4, r4, #7\n\t" \
"mov sp, r4\n\t"
#define VALGRIND_RESTORE_STACK \
"mov sp, r10\n\t"
/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
long) == 4. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #4] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #4 \n\t" \
"ldr r0, [%1, #20] \n\t" \
"push {r0} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"push {r0, r1} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #4 \n\t" \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"push {r0, r1, r2} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"ldr r3, [%1, #32] \n\t" \
"push {r0, r1, r2, r3} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #4 \n\t" \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"ldr r3, [%1, #32] \n\t" \
"ldr r4, [%1, #36] \n\t" \
"push {r0, r1, r2, r3, r4} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #40] \n\t" \
"push {r0} \n\t" \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"ldr r3, [%1, #32] \n\t" \
"ldr r4, [%1, #36] \n\t" \
"push {r0, r1, r2, r3, r4} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #4 \n\t" \
"ldr r0, [%1, #40] \n\t" \
"ldr r1, [%1, #44] \n\t" \
"push {r0, r1} \n\t" \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"ldr r3, [%1, #32] \n\t" \
"ldr r4, [%1, #36] \n\t" \
"push {r0, r1, r2, r3, r4} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #40] \n\t" \
"ldr r1, [%1, #44] \n\t" \
"ldr r2, [%1, #48] \n\t" \
"push {r0, r1, r2} \n\t" \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"ldr r3, [%1, #32] \n\t" \
"ldr r4, [%1, #36] \n\t" \
"push {r0, r1, r2, r3, r4} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_arm_linux */
/* ------------------------ arm64-linux ------------------------ */
#if defined(PLAT_arm64_linux)
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS \
"x0", "x1", "x2", "x3","x4", "x5", "x6", "x7", "x8", "x9", \
"x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", \
"x18", "x19", "x20", "x30", \
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", \
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", \
"v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", \
"v26", "v27", "v28", "v29", "v30", "v31"
/* x21 is callee-saved, so we can use it to save and restore SP around
the hidden call. */
#define VALGRIND_ALIGN_STACK \
"mov x21, sp\n\t" \
"bic sp, x21, #15\n\t"
#define VALGRIND_RESTORE_STACK \
"mov sp, x21\n\t"
/* These CALL_FN_ macros assume that on arm64-linux,
sizeof(unsigned long) == 8. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x6, [%1, #56] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x6, [%1, #56] \n\t" \
"ldr x7, [%1, #64] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #0x20 \n\t" \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x6, [%1, #56] \n\t" \
"ldr x7, [%1, #64] \n\t" \
"ldr x8, [%1, #72] \n\t" \
"str x8, [sp, #0] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #0x20 \n\t" \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x6, [%1, #56] \n\t" \
"ldr x7, [%1, #64] \n\t" \
"ldr x8, [%1, #72] \n\t" \
"str x8, [sp, #0] \n\t" \
"ldr x8, [%1, #80] \n\t" \
"str x8, [sp, #8] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #0x30 \n\t" \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x6, [%1, #56] \n\t" \
"ldr x7, [%1, #64] \n\t" \
"ldr x8, [%1, #72] \n\t" \
"str x8, [sp, #0] \n\t" \
"ldr x8, [%1, #80] \n\t" \
"str x8, [sp, #8] \n\t" \
"ldr x8, [%1, #88] \n\t" \
"str x8, [sp, #16] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10,arg11, \
arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
VALGRIND_ALIGN_STACK \
"sub sp, sp, #0x30 \n\t" \
"ldr x0, [%1, #8] \n\t" \
"ldr x1, [%1, #16] \n\t" \
"ldr x2, [%1, #24] \n\t" \
"ldr x3, [%1, #32] \n\t" \
"ldr x4, [%1, #40] \n\t" \
"ldr x5, [%1, #48] \n\t" \
"ldr x6, [%1, #56] \n\t" \
"ldr x7, [%1, #64] \n\t" \
"ldr x8, [%1, #72] \n\t" \
"str x8, [sp, #0] \n\t" \
"ldr x8, [%1, #80] \n\t" \
"str x8, [sp, #8] \n\t" \
"ldr x8, [%1, #88] \n\t" \
"str x8, [sp, #16] \n\t" \
"ldr x8, [%1, #96] \n\t" \
"str x8, [sp, #24] \n\t" \
"ldr x8, [%1] \n\t" /* target->x8 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_X8 \
VALGRIND_RESTORE_STACK \
"mov %0, x0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "x21" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_arm64_linux */
/* ------------------------- s390x-linux ------------------------- */
#if defined(PLAT_s390x_linux)
/* Similar workaround as amd64 (see above), but we use r11 as frame
pointer and save the old r11 in r7. r11 might be used for
argvec, therefore we copy argvec in r1 since r1 is clobbered
after the call anyway. */
#if defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM)
# define __FRAME_POINTER \
,"d"(__builtin_dwarf_cfa())
# define VALGRIND_CFI_PROLOGUE \
".cfi_remember_state\n\t" \
"lgr 1,%1\n\t" /* copy the argvec pointer in r1 */ \
"lgr 7,11\n\t" \
"lgr 11,%2\n\t" \
".cfi_def_cfa r11, 0\n\t"
# define VALGRIND_CFI_EPILOGUE \
"lgr 11, 7\n\t" \
".cfi_restore_state\n\t"
#else
# define __FRAME_POINTER
# define VALGRIND_CFI_PROLOGUE \
"lgr 1,%1\n\t"
# define VALGRIND_CFI_EPILOGUE
#endif
/* Nb: On s390 the stack pointer is properly aligned *at all times*
according to the s390 GCC maintainer. (The ABI specification is not
precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and
VALGRIND_RESTORE_STACK are not defined here. */
/* These regs are trashed by the hidden call. Note that we overwrite
r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the
function a proper return address. All others are ABI defined call
clobbers. */
#if defined(__VX__) || defined(__S390_VX__)
#define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \
"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \
"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \
"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
#else
#define __CALLER_SAVED_REGS "0", "1", "2", "3", "4", "5", "14", \
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7"
#endif
/* Nb: Although r11 is modified in the asm snippets below (inside
VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for
two reasons:
(1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not
modified
(2) GCC will complain that r11 cannot appear inside a clobber section,
when compiled with -O -fno-omit-frame-pointer
*/
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-160\n\t" \
"lg 1, 0(1)\n\t" /* target->r1 */ \
VALGRIND_CALL_NOREDIR_R1 \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
"lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "d" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
/* The call abi has the arguments in r2-r6 and stack */
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-160\n\t" \
"lg 2, 8(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
"lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1, arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-160\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
"lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1, arg2, arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-160\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
"lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1, arg2, arg3, arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-160\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
"lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1, arg2, arg3, arg4, arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-160\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"aghi 15,160\n\t" \
VALGRIND_CFI_EPILOGUE \
"lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-168\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"aghi 15,168\n\t" \
VALGRIND_CFI_EPILOGUE \
"lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6, arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-176\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"mvc 168(8,15), 56(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"aghi 15,176\n\t" \
VALGRIND_CFI_EPILOGUE \
"lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6, arg7 ,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-184\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"mvc 168(8,15), 56(1)\n\t" \
"mvc 176(8,15), 64(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"aghi 15,184\n\t" \
VALGRIND_CFI_EPILOGUE \
"lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6, arg7 ,arg8, arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-192\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"mvc 168(8,15), 56(1)\n\t" \
"mvc 176(8,15), 64(1)\n\t" \
"mvc 184(8,15), 72(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"aghi 15,192\n\t" \
VALGRIND_CFI_EPILOGUE \
"lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6, arg7 ,arg8, arg9, arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
_argvec[10] = (unsigned long)arg10; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-200\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"mvc 168(8,15), 56(1)\n\t" \
"mvc 176(8,15), 64(1)\n\t" \
"mvc 184(8,15), 72(1)\n\t" \
"mvc 192(8,15), 80(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"aghi 15,200\n\t" \
VALGRIND_CFI_EPILOGUE \
"lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6, arg7 ,arg8, arg9, arg10, arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
_argvec[10] = (unsigned long)arg10; \
_argvec[11] = (unsigned long)arg11; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-208\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"mvc 168(8,15), 56(1)\n\t" \
"mvc 176(8,15), 64(1)\n\t" \
"mvc 184(8,15), 72(1)\n\t" \
"mvc 192(8,15), 80(1)\n\t" \
"mvc 200(8,15), 88(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"aghi 15,208\n\t" \
VALGRIND_CFI_EPILOGUE \
"lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
_argvec[5] = (unsigned long)arg5; \
_argvec[6] = (unsigned long)arg6; \
_argvec[7] = (unsigned long)arg7; \
_argvec[8] = (unsigned long)arg8; \
_argvec[9] = (unsigned long)arg9; \
_argvec[10] = (unsigned long)arg10; \
_argvec[11] = (unsigned long)arg11; \
_argvec[12] = (unsigned long)arg12; \
__asm__ volatile( \
VALGRIND_CFI_PROLOGUE \
"aghi 15,-216\n\t" \
"lg 2, 8(1)\n\t" \
"lg 3,16(1)\n\t" \
"lg 4,24(1)\n\t" \
"lg 5,32(1)\n\t" \
"lg 6,40(1)\n\t" \
"mvc 160(8,15), 48(1)\n\t" \
"mvc 168(8,15), 56(1)\n\t" \
"mvc 176(8,15), 64(1)\n\t" \
"mvc 184(8,15), 72(1)\n\t" \
"mvc 192(8,15), 80(1)\n\t" \
"mvc 200(8,15), 88(1)\n\t" \
"mvc 208(8,15), 96(1)\n\t" \
"lg 1, 0(1)\n\t" \
VALGRIND_CALL_NOREDIR_R1 \
"aghi 15,216\n\t" \
VALGRIND_CFI_EPILOGUE \
"lgr %0, 2\n\t" \
: /*out*/ "=d" (_res) \
: /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
: /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_s390x_linux */
/* ------------------------- mips32-linux ----------------------- */
#if defined(PLAT_mips32_linux)
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \
"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
"$25", "$31"
/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
long) == 4. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"subu $29, $29, 16 \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 16\n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"subu $29, $29, 16 \n\t" \
"lw $4, 4(%1) \n\t" /* arg1*/ \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 16 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"subu $29, $29, 16 \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 16 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"subu $29, $29, 16 \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 16 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"subu $29, $29, 16 \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 16 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 24\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 24 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 32\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"nop\n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 32 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 32\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 28(%1) \n\t" \
"sw $4, 24($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 32 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 40\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 28(%1) \n\t" \
"sw $4, 24($29) \n\t" \
"lw $4, 32(%1) \n\t" \
"sw $4, 28($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 40 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 40\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 28(%1) \n\t" \
"sw $4, 24($29) \n\t" \
"lw $4, 32(%1) \n\t" \
"sw $4, 28($29) \n\t" \
"lw $4, 36(%1) \n\t" \
"sw $4, 32($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 40 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 48\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 28(%1) \n\t" \
"sw $4, 24($29) \n\t" \
"lw $4, 32(%1) \n\t" \
"sw $4, 28($29) \n\t" \
"lw $4, 36(%1) \n\t" \
"sw $4, 32($29) \n\t" \
"lw $4, 40(%1) \n\t" \
"sw $4, 36($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 48 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 48\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 28(%1) \n\t" \
"sw $4, 24($29) \n\t" \
"lw $4, 32(%1) \n\t" \
"sw $4, 28($29) \n\t" \
"lw $4, 36(%1) \n\t" \
"sw $4, 32($29) \n\t" \
"lw $4, 40(%1) \n\t" \
"sw $4, 36($29) \n\t" \
"lw $4, 44(%1) \n\t" \
"sw $4, 40($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 48 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
"subu $29, $29, 8 \n\t" \
"sw $28, 0($29) \n\t" \
"sw $31, 4($29) \n\t" \
"lw $4, 20(%1) \n\t" \
"subu $29, $29, 56\n\t" \
"sw $4, 16($29) \n\t" \
"lw $4, 24(%1) \n\t" \
"sw $4, 20($29) \n\t" \
"lw $4, 28(%1) \n\t" \
"sw $4, 24($29) \n\t" \
"lw $4, 32(%1) \n\t" \
"sw $4, 28($29) \n\t" \
"lw $4, 36(%1) \n\t" \
"sw $4, 32($29) \n\t" \
"lw $4, 40(%1) \n\t" \
"sw $4, 36($29) \n\t" \
"lw $4, 44(%1) \n\t" \
"sw $4, 40($29) \n\t" \
"lw $4, 48(%1) \n\t" \
"sw $4, 44($29) \n\t" \
"lw $4, 4(%1) \n\t" \
"lw $5, 8(%1) \n\t" \
"lw $6, 12(%1) \n\t" \
"lw $7, 16(%1) \n\t" \
"lw $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"addu $29, $29, 56 \n\t" \
"lw $28, 0($29) \n\t" \
"lw $31, 4($29) \n\t" \
"addu $29, $29, 8 \n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_mips32_linux */
/* ------------------------- nanomips-linux -------------------- */
#if defined(PLAT_nanomips_linux)
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS "$t4", "$t5", "$a0", "$a1", "$a2", \
"$a3", "$a4", "$a5", "$a6", "$a7", "$t0", "$t1", "$t2", "$t3", \
"$t8","$t9", "$at"
/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
long) == 4. */
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[1]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
"lw $t9, 0(%1)\n\t" \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $a0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
"lw $t9, 0(%1)\n\t" \
"lw $a0, 4(%1)\n\t" \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $a0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
"lw $t9, 0(%1)\n\t" \
"lw $a0, 4(%1)\n\t" \
"lw $a1, 8(%1)\n\t" \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $a0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
"lw $t9, 0(%1)\n\t" \
"lw $a0, 4(%1)\n\t" \
"lw $a1, 8(%1)\n\t" \
"lw $a2,12(%1)\n\t" \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $a0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
"lw $t9, 0(%1)\n\t" \
"lw $a0, 4(%1)\n\t" \
"lw $a1, 8(%1)\n\t" \
"lw $a2,12(%1)\n\t" \
"lw $a3,16(%1)\n\t" \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $a0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
"lw $t9, 0(%1)\n\t" \
"lw $a0, 4(%1)\n\t" \
"lw $a1, 8(%1)\n\t" \
"lw $a2,12(%1)\n\t" \
"lw $a3,16(%1)\n\t" \
"lw $a4,20(%1)\n\t" \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $a0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
"lw $t9, 0(%1)\n\t" \
"lw $a0, 4(%1)\n\t" \
"lw $a1, 8(%1)\n\t" \
"lw $a2,12(%1)\n\t" \
"lw $a3,16(%1)\n\t" \
"lw $a4,20(%1)\n\t" \
"lw $a5,24(%1)\n\t" \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $a0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
"lw $t9, 0(%1)\n\t" \
"lw $a0, 4(%1)\n\t" \
"lw $a1, 8(%1)\n\t" \
"lw $a2,12(%1)\n\t" \
"lw $a3,16(%1)\n\t" \
"lw $a4,20(%1)\n\t" \
"lw $a5,24(%1)\n\t" \
"lw $a6,28(%1)\n\t" \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $a0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
"lw $t9, 0(%1)\n\t" \
"lw $a0, 4(%1)\n\t" \
"lw $a1, 8(%1)\n\t" \
"lw $a2,12(%1)\n\t" \
"lw $a3,16(%1)\n\t" \
"lw $a4,20(%1)\n\t" \
"lw $a5,24(%1)\n\t" \
"lw $a6,28(%1)\n\t" \
"lw $a7,32(%1)\n\t" \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $a0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
"addiu $sp, $sp, -16 \n\t" \
"lw $t9,36(%1) \n\t" \
"sw $t9, 0($sp) \n\t" \
"lw $t9, 0(%1) \n\t" \
"lw $a0, 4(%1) \n\t" \
"lw $a1, 8(%1) \n\t" \
"lw $a2,12(%1) \n\t" \
"lw $a3,16(%1) \n\t" \
"lw $a4,20(%1) \n\t" \
"lw $a5,24(%1) \n\t" \
"lw $a6,28(%1) \n\t" \
"lw $a7,32(%1) \n\t" \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $a0 \n\t" \
"addiu $sp, $sp, 16 \n\t" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
"addiu $sp, $sp, -16 \n\t" \
"lw $t9,36(%1) \n\t" \
"sw $t9, 0($sp) \n\t" \
"lw $t9,40(%1) \n\t" \
"sw $t9, 4($sp) \n\t" \
"lw $t9, 0(%1) \n\t" \
"lw $a0, 4(%1) \n\t" \
"lw $a1, 8(%1) \n\t" \
"lw $a2,12(%1) \n\t" \
"lw $a3,16(%1) \n\t" \
"lw $a4,20(%1) \n\t" \
"lw $a5,24(%1) \n\t" \
"lw $a6,28(%1) \n\t" \
"lw $a7,32(%1) \n\t" \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $a0 \n\t" \
"addiu $sp, $sp, 16 \n\t" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[12]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
"addiu $sp, $sp, -16 \n\t" \
"lw $t9,36(%1) \n\t" \
"sw $t9, 0($sp) \n\t" \
"lw $t9,40(%1) \n\t" \
"sw $t9, 4($sp) \n\t" \
"lw $t9,44(%1) \n\t" \
"sw $t9, 8($sp) \n\t" \
"lw $t9, 0(%1) \n\t" \
"lw $a0, 4(%1) \n\t" \
"lw $a1, 8(%1) \n\t" \
"lw $a2,12(%1) \n\t" \
"lw $a3,16(%1) \n\t" \
"lw $a4,20(%1) \n\t" \
"lw $a5,24(%1) \n\t" \
"lw $a6,28(%1) \n\t" \
"lw $a7,32(%1) \n\t" \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $a0 \n\t" \
"addiu $sp, $sp, 16 \n\t" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _argvec[13]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
"addiu $sp, $sp, -16 \n\t" \
"lw $t9,36(%1) \n\t" \
"sw $t9, 0($sp) \n\t" \
"lw $t9,40(%1) \n\t" \
"sw $t9, 4($sp) \n\t" \
"lw $t9,44(%1) \n\t" \
"sw $t9, 8($sp) \n\t" \
"lw $t9,48(%1) \n\t" \
"sw $t9,12($sp) \n\t" \
"lw $t9, 0(%1) \n\t" \
"lw $a0, 4(%1) \n\t" \
"lw $a1, 8(%1) \n\t" \
"lw $a2,12(%1) \n\t" \
"lw $a3,16(%1) \n\t" \
"lw $a4,20(%1) \n\t" \
"lw $a5,24(%1) \n\t" \
"lw $a6,28(%1) \n\t" \
"lw $a7,32(%1) \n\t" \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $a0 \n\t" \
"addiu $sp, $sp, 16 \n\t" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
#endif /* PLAT_nanomips_linux */
/* ------------------------- mips64-linux ------------------------- */
#if defined(PLAT_mips64_linux)
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \
"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
"$25", "$31"
/* These CALL_FN_ macros assume that on mips64-linux,
sizeof(long long) == 8. */
#define MIPS64_LONG2REG_CAST(x) ((long long)(long)x)
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[1]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
__asm__ volatile( \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_W(lval, orig, arg1) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[2]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" /* arg1*/ \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[3]; \
volatile unsigned long long _res; \
_argvec[0] = _orig.nraddr; \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[4]; \
volatile unsigned long long _res; \
_argvec[0] = _orig.nraddr; \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[5]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[6]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[7]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[8]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
_argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $10, 56(%1)\n\t" \
"ld $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[9]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
_argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
_argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
__asm__ volatile( \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $10, 56(%1)\n\t" \
"ld $11, 64(%1)\n\t" \
"ld $25, 0(%1) \n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[10]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
_argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
_argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
_argvec[9] = MIPS64_LONG2REG_CAST(arg9); \
__asm__ volatile( \
"dsubu $29, $29, 8\n\t" \
"ld $4, 72(%1)\n\t" \
"sd $4, 0($29)\n\t" \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $10, 56(%1)\n\t" \
"ld $11, 64(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"daddu $29, $29, 8\n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[11]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
_argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
_argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
_argvec[9] = MIPS64_LONG2REG_CAST(arg9); \
_argvec[10] = MIPS64_LONG2REG_CAST(arg10); \
__asm__ volatile( \
"dsubu $29, $29, 16\n\t" \
"ld $4, 72(%1)\n\t" \
"sd $4, 0($29)\n\t" \
"ld $4, 80(%1)\n\t" \
"sd $4, 8($29)\n\t" \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $10, 56(%1)\n\t" \
"ld $11, 64(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"daddu $29, $29, 16\n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[12]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
_argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
_argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
_argvec[9] = MIPS64_LONG2REG_CAST(arg9); \
_argvec[10] = MIPS64_LONG2REG_CAST(arg10); \
_argvec[11] = MIPS64_LONG2REG_CAST(arg11); \
__asm__ volatile( \
"dsubu $29, $29, 24\n\t" \
"ld $4, 72(%1)\n\t" \
"sd $4, 0($29)\n\t" \
"ld $4, 80(%1)\n\t" \
"sd $4, 8($29)\n\t" \
"ld $4, 88(%1)\n\t" \
"sd $4, 16($29)\n\t" \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $10, 56(%1)\n\t" \
"ld $11, 64(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"daddu $29, $29, 24\n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
arg6,arg7,arg8,arg9,arg10, \
arg11,arg12) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long long _argvec[13]; \
volatile unsigned long long _res; \
_argvec[0] = MIPS64_LONG2REG_CAST(_orig.nraddr); \
_argvec[1] = MIPS64_LONG2REG_CAST(arg1); \
_argvec[2] = MIPS64_LONG2REG_CAST(arg2); \
_argvec[3] = MIPS64_LONG2REG_CAST(arg3); \
_argvec[4] = MIPS64_LONG2REG_CAST(arg4); \
_argvec[5] = MIPS64_LONG2REG_CAST(arg5); \
_argvec[6] = MIPS64_LONG2REG_CAST(arg6); \
_argvec[7] = MIPS64_LONG2REG_CAST(arg7); \
_argvec[8] = MIPS64_LONG2REG_CAST(arg8); \
_argvec[9] = MIPS64_LONG2REG_CAST(arg9); \
_argvec[10] = MIPS64_LONG2REG_CAST(arg10); \
_argvec[11] = MIPS64_LONG2REG_CAST(arg11); \
_argvec[12] = MIPS64_LONG2REG_CAST(arg12); \
__asm__ volatile( \
"dsubu $29, $29, 32\n\t" \
"ld $4, 72(%1)\n\t" \
"sd $4, 0($29)\n\t" \
"ld $4, 80(%1)\n\t" \
"sd $4, 8($29)\n\t" \
"ld $4, 88(%1)\n\t" \
"sd $4, 16($29)\n\t" \
"ld $4, 96(%1)\n\t" \
"sd $4, 24($29)\n\t" \
"ld $4, 8(%1)\n\t" \
"ld $5, 16(%1)\n\t" \
"ld $6, 24(%1)\n\t" \
"ld $7, 32(%1)\n\t" \
"ld $8, 40(%1)\n\t" \
"ld $9, 48(%1)\n\t" \
"ld $10, 56(%1)\n\t" \
"ld $11, 64(%1)\n\t" \
"ld $25, 0(%1)\n\t" /* target->t9 */ \
VALGRIND_CALL_NOREDIR_T9 \
"daddu $29, $29, 32\n\t" \
"move %0, $2\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
: /*trash*/ "memory", __CALLER_SAVED_REGS \
); \
lval = (__typeof__(lval)) (long)_res; \
} while (0)
#endif /* PLAT_mips64_linux */
/* ------------------------------------------------------------------ */
/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
/* */
/* ------------------------------------------------------------------ */
/* Some request codes. There are many more of these, but most are not
exposed to end-user view. These are the public ones, all of the
form 0x1000 + small_number.
Core ones are in the range 0x00000000--0x0000ffff. The non-public
ones start at 0x2000.
*/
/* These macros are used by tools -- they must be public, but don't
embed them into other programs. */
#define VG_USERREQ_TOOL_BASE(a,b) \
((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
#define VG_IS_TOOL_USERREQ(a, b, v) \
(VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE NUMERIC VALUES OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end of the most
relevant group. */
typedef
enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
/* These allow any function to be called from the simulated
CPU but run on the real CPU. Nb: the first arg passed to
the function is always the ThreadId of the running
thread! So CLIENT_CALL0 actually requires a 1 arg
function, etc. */
VG_USERREQ__CLIENT_CALL0 = 0x1101,
VG_USERREQ__CLIENT_CALL1 = 0x1102,
VG_USERREQ__CLIENT_CALL2 = 0x1103,
VG_USERREQ__CLIENT_CALL3 = 0x1104,
/* Can be useful in regression testing suites -- eg. can
send Valgrind's output to /dev/null and still count
errors. */
VG_USERREQ__COUNT_ERRORS = 0x1201,
/* Allows the client program and/or gdbserver to execute a monitor
command. */
VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202,
/* Allows the client program to change a dynamic command line
option. */
VG_USERREQ__CLO_CHANGE = 0x1203,
/* These are useful and can be interpreted by any tool that
tracks malloc() et al, by using vg_replace_malloc.c. */
VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
VG_USERREQ__RESIZEINPLACE_BLOCK = 0x130b,
VG_USERREQ__FREELIKE_BLOCK = 0x1302,
/* Memory pool support. */
VG_USERREQ__CREATE_MEMPOOL = 0x1303,
VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
VG_USERREQ__MEMPOOL_FREE = 0x1306,
VG_USERREQ__MEMPOOL_TRIM = 0x1307,
VG_USERREQ__MOVE_MEMPOOL = 0x1308,
VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
/* Allow printfs to valgrind log. */
/* The first two pass the va_list argument by value, which
assumes it is the same size as or smaller than a UWord,
which generally isn't the case. Hence are deprecated.
The second two pass the vargs by reference and so are
immune to this problem. */
/* both :: char* fmt, va_list vargs (DEPRECATED) */
VG_USERREQ__PRINTF = 0x1401,
VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
/* both :: char* fmt, va_list* vargs */
VG_USERREQ__PRINTF_VALIST_BY_REF = 0x1403,
VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF = 0x1404,
/* Stack support. */
VG_USERREQ__STACK_REGISTER = 0x1501,
VG_USERREQ__STACK_DEREGISTER = 0x1502,
VG_USERREQ__STACK_CHANGE = 0x1503,
/* Wine support */
VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601,
/* Querying of debug info. */
VG_USERREQ__MAP_IP_TO_SRCLOC = 0x1701,
/* Disable/enable error reporting level. Takes a single
Word arg which is the delta to this thread's error
disablement indicator. Hence 1 disables or further
disables errors, and -1 moves back towards enablement.
Other values are not allowed. */
VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801,
/* Some requests used for Valgrind internal, such as
self-test or self-hosting. */
/* Initialise IR injection */
VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901,
/* Used by Inner Valgrind to inform Outer Valgrind where to
find the list of inner guest threads */
VG_USERREQ__INNER_THREADS = 0x1902
} Vg_ClientRequest;
#if !defined(__GNUC__)
# define __extension__ /* */
#endif
/* Returns the number of Valgrinds this code is running under. That
is, 0 if running natively, 1 if running under Valgrind, 2 if
running under Valgrind which is running under another Valgrind,
etc. */
#define RUNNING_ON_VALGRIND \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* if not */, \
VG_USERREQ__RUNNING_ON_VALGRIND, \
0, 0, 0, 0, 0) \
/* Discard translation of code in the range [_qzz_addr .. _qzz_addr +
_qzz_len - 1]. Useful if you are debugging a JITter or some such,
since it provides a way to make sure valgrind will retranslate the
invalidated area. Returns no value. */
#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DISCARD_TRANSLATIONS, \
_qzz_addr, _qzz_len, 0, 0, 0)
#define VALGRIND_INNER_THREADS(_qzz_addr) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__INNER_THREADS, \
_qzz_addr, 0, 0, 0, 0)
/* These requests are for getting Valgrind itself to print something.
Possibly with a backtrace. This is a really ugly hack. The return value
is the number of characters printed, excluding the "**<pid>** " part at the
start and the backtrace (if present). */
#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
/* Modern GCC will optimize the static routine out if unused,
and unused attribute will shut down warnings about it. */
static int VALGRIND_PRINTF(const char *format, ...)
__attribute__((format(__printf__, 1, 2), __unused__));
#endif
static int
#if defined(_MSC_VER)
__inline
#endif
VALGRIND_PRINTF(const char *format, ...)
{
#if defined(NVALGRIND)
(void)format;
return 0;
#else /* NVALGRIND */
#if defined(_MSC_VER) || defined(__MINGW64__)
uintptr_t _qzz_res;
#else
unsigned long _qzz_res;
#endif
va_list vargs;
va_start(vargs, format);
#if defined(_MSC_VER) || defined(__MINGW64__)
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_VALIST_BY_REF,
(uintptr_t)format,
(uintptr_t)&vargs,
0, 0, 0);
#else
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_VALIST_BY_REF,
(unsigned long)format,
(unsigned long)&vargs,
0, 0, 0);
#endif
va_end(vargs);
return (int)_qzz_res;
#endif /* NVALGRIND */
}
#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
__attribute__((format(__printf__, 1, 2), __unused__));
#endif
static int
#if defined(_MSC_VER)
__inline
#endif
VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
{
#if defined(NVALGRIND)
(void)format;
return 0;
#else /* NVALGRIND */
#if defined(_MSC_VER) || defined(__MINGW64__)
uintptr_t _qzz_res;
#else
unsigned long _qzz_res;
#endif
va_list vargs;
va_start(vargs, format);
#if defined(_MSC_VER) || defined(__MINGW64__)
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
(uintptr_t)format,
(uintptr_t)&vargs,
0, 0, 0);
#else
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
(unsigned long)format,
(unsigned long)&vargs,
0, 0, 0);
#endif
va_end(vargs);
return (int)_qzz_res;
#endif /* NVALGRIND */
}
/* These requests allow control to move from the simulated CPU to the
real CPU, calling an arbitrary function.
Note that the current ThreadId is inserted as the first argument.
So this call:
VALGRIND_NON_SIMD_CALL2(f, arg1, arg2)
requires f to have this signature:
Word f(Word tid, Word arg1, Word arg2)
where "Word" is a word-sized type.
Note that these client requests are not entirely reliable. For example,
if you call a function with them that subsequently calls printf(),
there's a high chance Valgrind will crash. Generally, your prospects of
these working are made higher if the called function does not refer to
any global variables, and does not refer to any libc or other functions
(printf et al). Any kind of entanglement with libc or dynamic linking is
likely to have a bad outcome, for tricky reasons which we've grappled
with a lot in the past.
*/
#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__CLIENT_CALL0, \
_qyy_fn, \
0, 0, 0, 0)
#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__CLIENT_CALL1, \
_qyy_fn, \
_qyy_arg1, 0, 0, 0)
#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__CLIENT_CALL2, \
_qyy_fn, \
_qyy_arg1, _qyy_arg2, 0, 0)
#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__CLIENT_CALL3, \
_qyy_fn, \
_qyy_arg1, _qyy_arg2, \
_qyy_arg3, 0)
/* Counts the number of errors that have been recorded by a tool. Nb:
the tool must record the errors with VG_(maybe_record_error)() or
VG_(unique_error)() for them to be counted. */
#define VALGRIND_COUNT_ERRORS \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR( \
0 /* default return */, \
VG_USERREQ__COUNT_ERRORS, \
0, 0, 0, 0, 0)
/* Several Valgrind tools (Memcheck, Massif, Helgrind, DRD) rely on knowing
when heap blocks are allocated in order to give accurate results. This
happens automatically for the standard allocator functions such as
malloc(), calloc(), realloc(), memalign(), new, new[], free(), delete,
delete[], etc.
But if your program uses a custom allocator, this doesn't automatically
happen, and Valgrind will not do as well. For example, if you allocate
superblocks with mmap() and then allocates chunks of the superblocks, all
Valgrind's observations will be at the mmap() level and it won't know that
the chunks should be considered separate entities. In Memcheck's case,
that means you probably won't get heap block overrun detection (because
there won't be redzones marked as unaddressable) and you definitely won't
get any leak detection.
The following client requests allow a custom allocator to be annotated so
that it can be handled accurately by Valgrind.
VALGRIND_MALLOCLIKE_BLOCK marks a region of memory as having been allocated
by a malloc()-like function. For Memcheck (an illustrative case), this
does two things:
- It records that the block has been allocated. This means any addresses
within the block mentioned in error messages will be
identified as belonging to the block. It also means that if the block
isn't freed it will be detected by the leak checker.
- It marks the block as being addressable and undefined (if 'is_zeroed' is
not set), or addressable and defined (if 'is_zeroed' is set). This
controls how accesses to the block by the program are handled.
'addr' is the start of the usable block (ie. after any
redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
can apply redzones -- these are blocks of padding at the start and end of
each block. Adding redzones is recommended as it makes it much more likely
Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
zeroed (or filled with another predictable value), as is the case for
calloc().
VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
heap block -- that will be used by the client program -- is allocated.
It's best to put it at the outermost level of the allocator if possible;
for example, if you have a function my_alloc() which calls
internal_alloc(), and the client request is put inside internal_alloc(),
stack traces relating to the heap block will contain entries for both
my_alloc() and internal_alloc(), which is probably not what you want.
For Memcheck users: if you use VALGRIND_MALLOCLIKE_BLOCK to carve out
custom blocks from within a heap block, B, that has been allocated with
malloc/calloc/new/etc, then block B will be *ignored* during leak-checking
-- the custom blocks will take precedence.
VALGRIND_FREELIKE_BLOCK is the partner to VALGRIND_MALLOCLIKE_BLOCK. For
Memcheck, it does two things:
- It records that the block has been deallocated. This assumes that the
block was annotated as having been allocated via
VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
- It marks the block as being unaddressable.
VALGRIND_FREELIKE_BLOCK should be put immediately after the point where a
heap block is deallocated.
VALGRIND_RESIZEINPLACE_BLOCK informs a tool about reallocation. For
Memcheck, it does four things:
- It records that the size of a block has been changed. This assumes that
the block was annotated as having been allocated via
VALGRIND_MALLOCLIKE_BLOCK. Otherwise, an error will be issued.
- If the block shrunk, it marks the freed memory as being unaddressable.
- If the block grew, it marks the new area as undefined and defines a red
zone past the end of the new block.
- The V-bits of the overlap between the old and the new block are preserved.
VALGRIND_RESIZEINPLACE_BLOCK should be put after allocation of the new block
and before deallocation of the old block.
In many cases, these three client requests will not be enough to get your
allocator working well with Memcheck. More specifically, if your allocator
writes to freed blocks in any way then a VALGRIND_MAKE_MEM_UNDEFINED call
will be necessary to mark the memory as addressable just before the zeroing
occurs, otherwise you'll get a lot of invalid write errors. For example,
you'll need to do this if your allocator recycles freed blocks, but it
zeroes them before handing them back out (via VALGRIND_MALLOCLIKE_BLOCK).
Alternatively, if your allocator reuses freed blocks for allocator-internal
data structures, VALGRIND_MAKE_MEM_UNDEFINED calls will also be necessary.
Really, what's happening is a blurring of the lines between the client
program and the allocator... after VALGRIND_FREELIKE_BLOCK is called, the
memory should be considered unaddressable to the client program, but the
allocator knows more than the rest of the client program and so may be able
to safely access it. Extra client requests are necessary for Valgrind to
understand the distinction between the allocator and the rest of the
program.
Ignored if addr == 0.
*/
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MALLOCLIKE_BLOCK, \
addr, sizeB, rzB, is_zeroed, 0)
/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
Ignored if addr == 0.
*/
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__RESIZEINPLACE_BLOCK, \
addr, oldSizeB, newSizeB, rzB, 0)
/* See the comment for VALGRIND_MALLOCLIKE_BLOCK for details.
Ignored if addr == 0.
*/
#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__FREELIKE_BLOCK, \
addr, rzB, 0, 0, 0)
/* Create a memory pool. */
#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \
pool, rzB, is_zeroed, 0, 0)
/* Create a memory pool with some flags specifying extended behaviour.
When flags is zero, the behaviour is identical to VALGRIND_CREATE_MEMPOOL.
The flag VALGRIND_MEMPOOL_METAPOOL specifies that the pieces of memory
associated with the pool using VALGRIND_MEMPOOL_ALLOC will be used
by the application as superblocks to dole out MALLOC_LIKE blocks using
VALGRIND_MALLOCLIKE_BLOCK. In other words, a meta pool is a "2 levels"
pool : first level is the blocks described by VALGRIND_MEMPOOL_ALLOC.
The second level blocks are described using VALGRIND_MALLOCLIKE_BLOCK.
Note that the association between the pool and the second level blocks
is implicit : second level blocks will be located inside first level
blocks. It is necessary to use the VALGRIND_MEMPOOL_METAPOOL flag
for such 2 levels pools, as otherwise valgrind will detect overlapping
memory blocks, and will abort execution (e.g. during leak search).
Such a meta pool can also be marked as an 'auto free' pool using the flag
VALGRIND_MEMPOOL_AUTO_FREE, which must be OR-ed together with the
VALGRIND_MEMPOOL_METAPOOL. For an 'auto free' pool, VALGRIND_MEMPOOL_FREE
will automatically free the second level blocks that are contained
inside the first level block freed with VALGRIND_MEMPOOL_FREE.
In other words, calling VALGRIND_MEMPOOL_FREE will cause implicit calls
to VALGRIND_FREELIKE_BLOCK for all the second level blocks included
in the first level block.
Note: it is an error to use the VALGRIND_MEMPOOL_AUTO_FREE flag
without the VALGRIND_MEMPOOL_METAPOOL flag.
*/
#define VALGRIND_MEMPOOL_AUTO_FREE 1
#define VALGRIND_MEMPOOL_METAPOOL 2
#define VALGRIND_CREATE_MEMPOOL_EXT(pool, rzB, is_zeroed, flags) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CREATE_MEMPOOL, \
pool, rzB, is_zeroed, flags, 0)
/* Destroy a memory pool. */
#define VALGRIND_DESTROY_MEMPOOL(pool) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DESTROY_MEMPOOL, \
pool, 0, 0, 0, 0)
/* Associate a piece of memory with a memory pool. */
#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_ALLOC, \
pool, addr, size, 0, 0)
/* Disassociate a piece of memory from a memory pool. */
#define VALGRIND_MEMPOOL_FREE(pool, addr) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_FREE, \
pool, addr, 0, 0, 0)
/* Disassociate any pieces outside a particular range. */
#define VALGRIND_MEMPOOL_TRIM(pool, addr, size) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_TRIM, \
pool, addr, size, 0, 0)
/* Resize and/or move a piece associated with a memory pool. */
#define VALGRIND_MOVE_MEMPOOL(poolA, poolB) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MOVE_MEMPOOL, \
poolA, poolB, 0, 0, 0)
/* Resize and/or move a piece associated with a memory pool. */
#define VALGRIND_MEMPOOL_CHANGE(pool, addrA, addrB, size) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__MEMPOOL_CHANGE, \
pool, addrA, addrB, size, 0)
/* Return 1 if a mempool exists, else 0. */
#define VALGRIND_MEMPOOL_EXISTS(pool) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MEMPOOL_EXISTS, \
pool, 0, 0, 0, 0)
/* Mark a piece of memory as being a stack. Returns a stack id.
start is the lowest addressable stack byte, end is the highest
addressable stack byte. */
#define VALGRIND_STACK_REGISTER(start, end) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__STACK_REGISTER, \
start, end, 0, 0, 0)
/* Unmark the piece of memory associated with a stack id as being a
stack. */
#define VALGRIND_STACK_DEREGISTER(id) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_DEREGISTER, \
id, 0, 0, 0, 0)
/* Change the start and end address of the stack id.
start is the new lowest addressable stack byte, end is the new highest
addressable stack byte. */
#define VALGRIND_STACK_CHANGE(id, start, end) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__STACK_CHANGE, \
id, start, end, 0, 0)
/* Load PDB debug info for Wine PE image_map. */
#define VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, delta) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__LOAD_PDB_DEBUGINFO, \
fd, ptr, total_size, delta, 0)
/* Map a code address to a source file name and line number. buf64
must point to a 64-byte buffer in the caller's address space. The
result will be dumped in there and is guaranteed to be zero
terminated. If no info is found, the first byte is set to zero. */
#define VALGRIND_MAP_IP_TO_SRCLOC(addr, buf64) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__MAP_IP_TO_SRCLOC, \
addr, buf64, 0, 0, 0)
/* Disable error reporting for this thread. Behaves in a stack like
way, so you can safely call this multiple times provided that
VALGRIND_ENABLE_ERROR_REPORTING is called the same number of times
to re-enable reporting. The first call of this macro disables
reporting. Subsequent calls have no effect except to increase the
number of VALGRIND_ENABLE_ERROR_REPORTING calls needed to re-enable
reporting. Child threads do not inherit this setting from their
parents -- they are always created with reporting enabled. */
#define VALGRIND_DISABLE_ERROR_REPORTING \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \
1, 0, 0, 0, 0)
/* Re-enable error reporting, as per comments on
VALGRIND_DISABLE_ERROR_REPORTING. */
#define VALGRIND_ENABLE_ERROR_REPORTING \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \
-1, 0, 0, 0, 0)
/* Execute a monitor command from the client program.
If a connection is opened with GDB, the output will be sent
according to the output mode set for vgdb.
If no connection is opened, output will go to the log output.
Returns 1 if command not recognised, 0 otherwise. */
#define VALGRIND_MONITOR_COMMAND(command) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__GDB_MONITOR_COMMAND, \
command, 0, 0, 0, 0)
/* Change the value of a dynamic command line option.
Note that unknown or not dynamically changeable options
will cause a warning message to be output. */
#define VALGRIND_CLO_CHANGE(option) \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CLO_CHANGE, \
option, 0, 0, 0, 0)
#undef PLAT_x86_darwin
#undef PLAT_amd64_darwin
#undef PLAT_x86_win32
#undef PLAT_amd64_win64
#undef PLAT_x86_linux
#undef PLAT_amd64_linux
#undef PLAT_ppc32_linux
#undef PLAT_ppc64be_linux
#undef PLAT_ppc64le_linux
#undef PLAT_arm_linux
#undef PLAT_s390x_linux
#undef PLAT_mips32_linux
#undef PLAT_mips64_linux
#undef PLAT_nanomips_linux
#undef PLAT_x86_solaris
#undef PLAT_amd64_solaris
#endif /* __VALGRIND_H */
```
|
==========================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\bottleneck\__init__.py
ENCODING: utf-8
```py
```
|
==========================================================================================================================
SOURCE CODE FILE: __main__.py
LINES: 6
SIZE: 7.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\bottleneck\__main__.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import argparse
import cProfile
import pstats
import sys
import os
import torch
from torch.autograd import profiler
from torch.utils.collect_env import get_env_info
def redirect_argv(new_argv):
sys.argv[:] = new_argv[:]
def compiled_with_cuda(sysinfo):
if sysinfo.cuda_compiled_version:
return f'compiled w/ CUDA {sysinfo.cuda_compiled_version}'
return 'not compiled w/ CUDA'
env_summary = """
--------------------------------------------------------------------------------
Environment Summary
--------------------------------------------------------------------------------
PyTorch {pytorch_version}{debug_str} {cuda_compiled}
Running with Python {py_version} and {cuda_runtime}
`{pip_version} list` truncated output:
{pip_list_output}
""".strip()
def run_env_analysis():
print('Running environment analysis...')
info = get_env_info()
result: dict[str, str] = {}
debug_str = ''
if info.is_debug_build:
debug_str = ' DEBUG'
cuda_avail = ''
if info.is_cuda_available:
cuda = info.cuda_runtime_version
if cuda is not None:
cuda_avail = 'CUDA ' + cuda
else:
cuda = 'CUDA unavailable'
pip_version = info.pip_version
pip_list_output = info.pip_packages
if pip_list_output is None:
pip_list_output = 'Unable to fetch'
result = {
'debug_str': debug_str,
'pytorch_version': info.torch_version,
'cuda_compiled': compiled_with_cuda(info),
'py_version': f'{sys.version_info[0]}.{sys.version_info[1]}',
'cuda_runtime': cuda_avail,
'pip_version': pip_version,
'pip_list_output': pip_list_output,
}
return env_summary.format(**result)
def run_cprofile(code, globs, launch_blocking=False):
print('Running your script with cProfile')
prof = cProfile.Profile()
prof.enable()
exec(code, globs, None)
prof.disable()
return prof
cprof_summary = """
--------------------------------------------------------------------------------
cProfile output
--------------------------------------------------------------------------------
""".strip()
def print_cprofile_summary(prof, sortby='tottime', topk=15):
print(cprof_summary)
cprofile_stats = pstats.Stats(prof).sort_stats(sortby)
cprofile_stats.print_stats(topk)
def run_autograd_prof(code, globs):
def run_prof(use_cuda=False):
with profiler.profile(use_cuda=use_cuda) as prof:
exec(code, globs, None)
return prof
print('Running your script with the autograd profiler...')
result = [run_prof(use_cuda=False)]
if torch.cuda.is_available():
result.append(run_prof(use_cuda=True))
else:
result.append(None)
return result
autograd_prof_summary = """
--------------------------------------------------------------------------------
autograd profiler output ({mode} mode)
--------------------------------------------------------------------------------
{description}
{cuda_warning}
{output}
""".strip()
def print_autograd_prof_summary(prof, mode, sortby='cpu_time', topk=15):
valid_sortby = ['cpu_time', 'cuda_time', 'cpu_time_total', 'cuda_time_total', 'count']
if sortby not in valid_sortby:
warn = ('WARNING: invalid sorting option for autograd profiler results: {}\n'
'Expected `cpu_time`, `cpu_time_total`, or `count`. '
'Defaulting to `cpu_time`.')
print(warn.format(sortby))
sortby = 'cpu_time'
if mode == 'CUDA':
cuda_warning = ('\n\tBecause the autograd profiler uses the CUDA event API,\n'
'\tthe CUDA time column reports approximately max(cuda_time, cpu_time).\n'
'\tPlease ignore this output if your code does not use CUDA.\n')
else:
cuda_warning = ''
sorted_events = sorted(prof.function_events,
key=lambda x: getattr(x, sortby), reverse=True)
topk_events = sorted_events[:topk]
result = {
'mode': mode,
'description': f'top {topk} events sorted by {sortby}',
'output': torch.autograd.profiler_util._build_table(topk_events),
'cuda_warning': cuda_warning
}
print(autograd_prof_summary.format(**result))
descript = """
`bottleneck` is a tool that can be used as an initial step for debugging
bottlenecks in your program.
It summarizes runs of your script with the Python profiler and PyTorch\'s
autograd profiler. Because your script will be profiled, please ensure that it
exits in a finite amount of time.
For more complicated uses of the profilers, please see
https://docs.python.org/3/library/profile.html and
https://pytorch.org/docs/main/autograd.html#profiler for more information.
""".strip()
def parse_args():
parser = argparse.ArgumentParser(description=descript)
parser.add_argument('scriptfile', type=str,
help='Path to the script to be run. '
'Usually run with `python path/to/script`.')
parser.add_argument('args', type=str, nargs=argparse.REMAINDER,
help='Command-line arguments to be passed to the script.')
return parser.parse_args()
def cpu_time_total(autograd_prof):
return sum(event.cpu_time_total for event in autograd_prof.function_events)
def main():
args = parse_args()
# Customizable constants.
scriptfile = args.scriptfile
scriptargs = [] if args.args is None else args.args
scriptargs.insert(0, scriptfile)
cprofile_sortby = 'tottime'
cprofile_topk = 15
autograd_prof_sortby = 'cpu_time_total'
autograd_prof_topk = 15
redirect_argv(scriptargs)
sys.path.insert(0, os.path.dirname(scriptfile))
with open(scriptfile, 'rb') as stream:
code = compile(stream.read(), scriptfile, 'exec')
globs = {
'__file__': scriptfile,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
print(descript)
env_summary = run_env_analysis()
if torch.cuda.is_available():
torch.cuda.init()
cprofile_prof = run_cprofile(code, globs)
autograd_prof_cpu, autograd_prof_cuda = run_autograd_prof(code, globs)
print(env_summary)
print_cprofile_summary(cprofile_prof, cprofile_sortby, cprofile_topk)
if not torch.cuda.is_available():
print_autograd_prof_summary(autograd_prof_cpu, 'CPU', autograd_prof_sortby, autograd_prof_topk)
return
# Print both the result of the CPU-mode and CUDA-mode autograd profilers
# if their execution times are very different.
cuda_prof_exec_time = cpu_time_total(autograd_prof_cuda)
if len(autograd_prof_cpu.function_events) > 0:
cpu_prof_exec_time = cpu_time_total(autograd_prof_cpu)
pct_diff = (cuda_prof_exec_time - cpu_prof_exec_time) / cuda_prof_exec_time
if abs(pct_diff) > 0.05:
print_autograd_prof_summary(autograd_prof_cpu, 'CPU', autograd_prof_sortby, autograd_prof_topk)
print_autograd_prof_summary(autograd_prof_cuda, 'CUDA', autograd_prof_sortby, autograd_prof_topk)
if __name__ == '__main__':
main()
```
|
=====================================================================================================================
SOURCE CODE FILE: bundled_inputs.py
LINES: 4
SIZE: 22.51 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\bundled_inputs.py
ENCODING: utf-8
```py
#!/usr/bin/env python3
# mypy: allow-untyped-defs
from typing import Any, TypeVar, Optional, NamedTuple, Union, Callable
from collections.abc import Sequence
import textwrap
import torch
from torch._C import TupleType, ListType
from torch.jit._recursive import wrap_cpp_module
T = TypeVar("T")
MAX_RAW_TENSOR_SIZE = 16
class InflatableArg(NamedTuple):
"""Helper type for bundled inputs.
'value' is the compressed/deflated input that is stored in the model. Value
must be of the same type as the argument to the function that it is a deflated
input for.
'fmt' is a formatable code string that is executed to inflate the compressed data into
the appropriate input. It can use 'value' as an input to the format str. It must result
in a value of the same type as 'value'.
'fmt_fn' is a formatable function code string that is executed to inflate the compressed
data into the appropriate input. It must result in a value of the same type as 'value'.
The function name should be the formatable part of the string.
Note: Only top level InflatableArgs can be inflated. i.e. you cannot place
an inflatable arg inside of some other structure. You should instead create
an inflatable arg such that the fmt code string returns the full structure
of your input.
"""
value: Any
fmt: str = "{}"
fmt_fn: str = ""
def bundle_inputs(
model: torch.jit.ScriptModule,
inputs: Union[Optional[Sequence[tuple[Any, ...]]], dict[Callable, Optional[Sequence[tuple[Any, ...]]]]],
info: Optional[Union[list[str], dict[Callable, list[str]]]] = None,
*,
_receive_inflate_expr: Optional[list[str]] = None,
) -> torch.jit.ScriptModule:
"""Create and return a copy of the specified model with inputs attached.
The original model is not mutated or changed in any way.
Models with bundled inputs can be invoked in a uniform manner by
benchmarking and code coverage tools.
If inputs is passed in as a list then the inputs will be bundled for 'forward'.
If inputs is instead passed in as a map then all the methods specified in the map
will have their corresponding inputs bundled. Info should match watchever type is
chosen for the inputs.
The returned model will support the following methods:
`get_all_bundled_inputs_for_<function_name>() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs_for_foo(): model.foo(*inp)`
`get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]`
Returns a dictionary mapping function names to a metadata dictionary.
This nested dictionary maps preset strings like:
'get_inputs_function_name' -> the name of a function attribute in this model that can be
run to get back a list of inputs corresponding to that function.
'info' -> the user provided extra information about the bundled inputs
If forward has bundled inputs then these following functions will also be defined on the returned module:
`get_all_bundled_inputs() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs(): model(*inp)`
`get_num_bundled_inputs() -> int`
Equivalent to `len(model.get_all_bundled_inputs())`,
but slightly easier to call from C++.
Inputs can be specified in one of two ways:
- The model can define `_generate_bundled_inputs_for_<function_name>`.
If the user chooses this method inputs[<function>] should map to None
- The `inputs` argument to this function can be a dictionary mapping functions to a
list of inputs, of the same form that will be returned by get_all_bundled_inputs_for_<function_name>.
Alternatively if only bundling inputs for forward the map can be omitted and a singular list of inputs
can be provided instead.
The type of the inputs is List[Tuple[Any, ...]]. The outer list corresponds with a
list of inputs, the inner tuple is the list of args that together make up one input.
For inputs of functions that take one arg, this will be a tuple of length one. The Any, ...
is the actual data that makes up the args, e.g. a tensor.
Info is an optional parameter that maps functions to a list of strings providing extra information about that
function's bundled inputs. Alternatively if only bundling inputs for forward the map can be omitted and
a singular list of information can be provided instead. This could be descriptions, expected outputs, etc.
- Ex: info={model.forward : ['man eating icecream', 'an airplane', 'a dog']}
This function will attempt to optimize arguments so that (e.g.)
arguments like `torch.zeros(1000)` will be represented compactly.
Only top-level arguments will be optimized.
Tensors in lists or tuples will not.
"""
if not isinstance(model, torch.jit.ScriptModule):
raise Exception("Only ScriptModule is supported.") # noqa: TRY002
ignored_methods, ignored_attrs = _get_bundled_inputs_attributes_and_methods(model)
clone = torch._C._hack_do_not_use_clone_module_with_class( # type: ignore[attr-defined]
model._c,
ignored_methods,
ignored_attrs,
)
# The above cloning function returns a torch._C.scriptmodule and we need a torch.jit.scriptmodule.
# Fortunately theres a function in _recursive that does exactly that conversion.
cloned_module = wrap_cpp_module(clone)
if isinstance(inputs, dict):
assert isinstance(info, dict) or info is None
augment_many_model_functions_with_bundled_inputs(cloned_module, inputs, _receive_inflate_expr, info)
else:
assert isinstance(info, list) or info is None
augment_model_with_bundled_inputs(cloned_module, inputs, _receive_inflate_expr, info)
return cloned_module
def augment_model_with_bundled_inputs(
model: torch.jit.ScriptModule,
inputs: Optional[Sequence[tuple[Any, ...]]] = None,
_receive_inflate_expr: Optional[list[str]] = None, # For debugging.
info: Optional[list[str]] = None, # Optional argument to provide info about forward or its inputs
skip_size_check=False,
) -> None:
"""Add bundled sample inputs to a model for the forward function.
Models with bundled inputs can be invoked in a uniform manner by
benchmarking and code coverage tools.
Augmented models will support the following methods:
`get_all_bundled_inputs() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs(): model(*inp)`
`get_num_bundled_inputs() -> int`
Equivalent to `len(model.get_all_bundled_inputs())`,
but slightly easier to call from C++.
`get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]`
Returns a dictionary mapping function names to a metadata dictionary.
This nested dictionary maps preset strings like:
'get_inputs_function_name' -> the name of a function attribute in this model that can be
run to get back a list of inputs corresponding to that function.
'info' -> the user provided extra information about the bundled inputs
Inputs can be specified in one of two ways:
- The model can define `_generate_bundled_inputs_for_forward`.
If the user chooses this method inputs should be None
- `inputs` is a list of inputs of form List[Tuple[Any, ...]]. A list of tuples where the elements
of each tuple are the args that make up one input.
"""
if not isinstance(model, torch.jit.ScriptModule):
raise Exception("Only ScriptModule is supported.") # noqa: TRY002
forward: Callable = model.forward
# Sometimes forward won't have a name attached so just in case
if not hasattr(forward, "__name__"):
forward.__name__ = 'forward'
augment_many_model_functions_with_bundled_inputs(
model,
inputs={forward : inputs},
_receive_inflate_expr=_receive_inflate_expr,
info={forward : info} if info else None,
skip_size_check=skip_size_check,
)
def augment_many_model_functions_with_bundled_inputs(
model: torch.jit.ScriptModule,
inputs: dict[Callable, Optional[Sequence[tuple[Any, ...]]]],
_receive_inflate_expr: Optional[list[str]] = None, # For debugging.
info: Optional[dict[Callable, list[str]]] = None, # Optional argument to provide info about the function or its inputs
skip_size_check=False,
) -> None:
"""Add bundled sample inputs to a model for an arbitrary list of public functions.
Models with bundled inputs can be invoked in a uniform manner by
benchmarking and code coverage tools.
Augmented models will support the following methods:
`get_all_bundled_inputs_for_<function_name>() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs_for_foo(): model.foo(*inp)`
`get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]`
Returns a dictionary mapping function names to a metadata dictionary.
This nested dictionary maps preset strings like:
'get_inputs_function_name' -> the name of a function attribute in this model that can be
run to get back a list of inputs corresponding to that function.
'info' -> the user provided extra information about the bundled inputs
If forward has bundled inputs then these following functions are also defined:
`get_all_bundled_inputs() -> List[Tuple[Any, ...]]`
Returns a list of tuples suitable for passing to the model like
`for inp in model.get_all_bundled_inputs(): model(*inp)`
`get_num_bundled_inputs() -> int`
Equivalent to `len(model.get_all_bundled_inputs())`,
but slightly easier to call from C++.
Inputs can be specified in one of two ways:
- The model can define `_generate_bundled_inputs_for_<function_name>`.
If the user chooses this method inputs[<function>] should map to None
- The `inputs` argument to this function can be a dictionary mapping functions to a
list of inputs, of the same form that will be returned by get_all_bundled_inputs_for_<function_name>.
The type of the inputs is List[Tuple[Any, ...]]. The outer list corresponds with a
list of inputs, the inner tuple is the list of args that together make up one input.
For inputs of functions that take one arg, this will be a tuple of length one. The Any, ...
is the actual data that makes up the args, e.g. a tensor.
Info is an optional parameter that maps functions to a list of strings providing extra information about that
function's bundled inputs. This could be descriptions, expected outputs, etc.
- Ex: info={model.forward : ['man eating icecream', 'an airplane', 'a dog']}
This function will attempt to optimize arguments so that (e.g.)
arguments like `torch.zeros(1000)` will be represented compactly.
Only top-level arguments will be optimized.
Tensors in lists or tuples will not.
"""
if not isinstance(model, torch.jit.ScriptModule):
raise Exception("Only ScriptModule is supported.") # noqa: TRY002
if not inputs:
raise Exception("Please provide inputs for at least 1 function") # noqa: TRY002
if hasattr(model, "get_all_bundled_inputs") or hasattr(model, "get_bundled_inputs_functions_and_info"):
raise Exception( # noqa: TRY002
"Models can only be augmented with bundled inputs once. "
"This Model seems to have already been augmented with "
"bundled inputs. Please start afresh with one that "
"doesn't have bundled inputs.",
)
get_bundled_inputs_functions_and_info_template = ""
for function, input_list in inputs.items():
if hasattr(function, "__name__"):
function_name = function.__name__
else:
if hasattr(function, "name"):
function_name = function.name # type: ignore[attr-defined]
else:
raise Exception( # noqa: TRY002
'At least one of your functions has no attribute name please ensure all have one. m.foo.name = "foo"')
if input_list is not None and not isinstance(input_list, Sequence):
raise TypeError(f"Error inputs for function {function_name} is not a Sequence")
function_arg_types = [arg.type for arg in function.schema.arguments[1:]] # type: ignore[attr-defined]
deflated_inputs_type: ListType = ListType(TupleType(function_arg_types))
model._c._register_attribute(f"_bundled_inputs_deflated_{function_name}", deflated_inputs_type, [])
if hasattr(model, "_generate_bundled_inputs_for_" + function_name):
if input_list is not None:
raise Exception( # noqa: TRY002
f"inputs[{function_name}] is not None, but _generate_bundled_inputs_for_{function_name} is already defined"
)
# Model author already defined _generate_bundled_inputs_for_<function_name>.
elif input_list is None or len(input_list) == 0:
raise Exception( # noqa: TRY002
f"inputs for {function_name} must be specified if "
f"_generate_bundled_inputs_for_{function_name} is not already defined"
)
else:
# Iterate over the inputs and args in each input.
# Accumulate `deflated_inputs` as (possibly) compressed values
# and `parts` to be joined into the expression that unpacks them.
deflated_inputs = []
parts = []
for inp_idx, args in enumerate(input_list):
if not isinstance(args, tuple) and not isinstance(args, list): # type: ignore[arg-type]
raise TypeError(
f"Error bundled input for function {function_name} idx: {inp_idx} is not a Tuple or a List"
)
deflated_args = []
parts.append("(")
for arg_idx, arg in enumerate(args):
inflate_helper_fn_name = _get_inflate_helper_fn_name(arg_idx, inp_idx, function_name)
deflated, inflater, helper_definition = _inflate_expr(
arg,
f"deflated[{inp_idx}][{arg_idx}]",
inflate_helper_fn_name,
skip_size_check=skip_size_check,
)
deflated_args.append(deflated)
parts.append(f" {inflater},")
if helper_definition:
model.define(textwrap.dedent(helper_definition))
deflated_inputs.append(tuple(deflated_args))
parts.append("),")
parts.append("")
expr = "\n".join(parts)
# Back-channel return this expr for debugging.
if _receive_inflate_expr is not None:
_receive_inflate_expr.append(expr)
setattr(model, f"_bundled_inputs_deflated_{function_name}", deflated_inputs)
definition = textwrap.dedent("""
def _generate_bundled_inputs_for_{name}(self):
deflated = self._bundled_inputs_deflated_{name}
return [
{expr}
]
""").format(expr=expr, name=function_name)
model.define(definition)
# Define get_all_bundled_inputs_for_<function_name> that caches the generated inputs.
model.define(textwrap.dedent("""
def get_all_bundled_inputs_for_{name}(self):
all_inputs = self._generate_bundled_inputs_for_{name}()
assert all_inputs is not None
return all_inputs
""").format(name=function_name))
# Add to the high level helper methods
inputs_info = repr(info[function]) if info and function in info else '[]'
get_bundled_inputs_functions_and_info_template += f"""
temp_dict : Dict[str,List[str]] = {{}}
info: List[str] = {inputs_info}
temp_dict['info'] = info
temp_dict['get_inputs_function_name'] = ['get_all_bundled_inputs_for_{function_name}']
all_inputs['{function_name}'] = temp_dict
"""
# To ensure backwards compatibility and a streamlined api for forward these wrappers are provided
if function_name == 'forward':
model.define(textwrap.dedent("""
def get_all_bundled_inputs(self):
return self.get_all_bundled_inputs_for_forward()
"""))
model.define(textwrap.dedent("""
def get_num_bundled_inputs(self):
return len(self.get_all_bundled_inputs_for_forward())
"""))
# Define some high level helper methods that act on all bundled inputs
model.define(textwrap.dedent(f"""
def get_bundled_inputs_functions_and_info(self):
all_inputs : Dict[str, Dict[str,List[str]]] = {{}}
{get_bundled_inputs_functions_and_info_template}
return all_inputs
"""))
def _inflate_expr(
arg: T, ref: str, inflate_helper_fn_name: str, skip_size_check: bool = False
) -> tuple[Union[T, torch.Tensor], str, Optional[str]]:
# Allow custom inflation expressions any object.
# For example, calling custom image-decoding ops.
# Or just use "{}" as the format string to ignore size limits.
if isinstance(arg, InflatableArg):
if arg.fmt_fn:
if arg.fmt not in ["{}", ""]:
raise Exception( # noqa: TRY002
f"Bundled input argument at position '{ref}' has "
f"both arg.fmt_fn => \n{arg.fmt_fn} "
f"\n and arg.fmt => {arg.fmt}. "
"Please choose `arg.fmt` if the deflater is straightforward or "
"`arg.fmt_fn` if you need a function."
)
helper_definition = arg.fmt_fn.format(inflate_helper_fn_name)
expr = f"self.{inflate_helper_fn_name}({ref})"
return arg.value, expr, helper_definition
else:
return arg.value, arg.fmt.format(ref), None
if isinstance(arg, torch.Tensor):
# Small-storage tensors can just be saved directly.
if arg._typed_storage().size() <= MAX_RAW_TENSOR_SIZE or skip_size_check:
return arg, ref, None
# Small contiguous tensors can be cloned to have small storage.
# TODO: Should we do this even for non-contiguous tensors?
if arg.is_contiguous() and arg.numel() <= MAX_RAW_TENSOR_SIZE:
return arg.clone(), ref, None
# Example inputs commonly come from torch.zeros, torch.ones, or torch.full.
# These can be represented compactly.
for fmt in [torch.contiguous_format, torch.channels_last]:
if arg.is_contiguous(memory_format=fmt) and (arg == arg.flatten()[0]).all().item():
return (arg.flatten()[0].clone().expand(*arg.size()),
f"{ref}.contiguous(memory_format={fmt})", None)
# Prevent big tensors from being bundled by default.
# TODO: Provide more useful diagnostics.
raise Exception( # noqa: TRY002
f"Bundled input argument at position '{ref}' is "
f"a tensor with storage size {arg._typed_storage().size()}. "
f"You probably don't want to bundle this as an input. "
)
else:
return arg, ref, None
def _get_bundled_inputs_attributes_and_methods(script_module: torch.jit.ScriptModule) -> tuple[list[str], list[str]]:
methods: list[str] = []
attributes: list[str] = []
# Has bundled inputs for forward
if hasattr(script_module, 'get_all_bundled_inputs'):
methods.append('get_all_bundled_inputs')
methods.append('get_num_bundled_inputs')
methods.append('run_on_bundled_input')
if hasattr(script_module, 'get_bundled_inputs_functions_and_info'):
methods.append('get_bundled_inputs_functions_and_info')
all_info = script_module.get_bundled_inputs_functions_and_info()
for function_name in all_info:
methods.append("get_all_bundled_inputs_for_" + function_name)
methods.append("_generate_bundled_inputs_for_" + function_name)
attributes.append("_bundled_inputs_deflated_" + function_name)
bundled_inputs_fn = getattr(
script_module,
f"get_all_bundled_inputs_for_{function_name}"
)
num_bundled_inputs: int = len(bundled_inputs_fn())
# Check inflate helper functions for each function, argument and bundled input
func = getattr(script_module, function_name)
for arg_idx in range(len(func.schema.arguments) - 1):
for input_idx in range(num_bundled_inputs):
helper_fn_name = _get_inflate_helper_fn_name(
arg_idx=arg_idx,
input_idx=input_idx,
function_name=function_name
)
# if the arg has an InflatableArg with fmt_fn, add the helper function name
if hasattr(script_module, helper_fn_name):
methods.append(helper_fn_name)
return (methods, attributes)
def _get_inflate_helper_fn_name(
arg_idx: int,
input_idx: int,
function_name: str,
) -> str:
return f"_inflate_helper_for_{function_name}_input_{input_idx}_arg_{arg_idx}"
def bundle_randn(*size, dtype=None):
"""Generate a tensor that will be inflated with torch.randn."""
stub = torch.zeros(1, dtype=dtype).expand(*size)
return InflatableArg(value=stub, fmt="torch.randn_like({})")
def bundle_large_tensor(t):
"""Wrap a tensor to allow bundling regardless of size."""
return InflatableArg(value=t, fmt="{}")
```
|
=================================================================================================================
SOURCE CODE FILE: checkpoint.py
LINES: 15
SIZE: 66.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\checkpoint.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import contextlib
import platform
import uuid
import warnings
import weakref
from collections import defaultdict
from typing import * # noqa: F403
import enum
from weakref import ReferenceType
import torch
import torch.fx.traceback as fx_traceback
from torch._functorch._aot_autograd.functional_utils import is_fun
from torch.utils._pytree import tree_map
from torch.testing._internal.logging_tensor import capture_logs, LoggingTensorMode
from torch.utils._python_dispatch import TorchDispatchMode
__all__ = [
"checkpoint",
"checkpoint_sequential",
"CheckpointError",
"CheckpointFunction",
"check_backward_validity",
"detach_variable",
"get_device_states",
"set_device_states",
"noop_context_fn",
"set_checkpoint_early_stop",
"DefaultDeviceType",
"set_checkpoint_debug_enabled",
"CheckpointPolicy",
"SelectiveCheckpointContext",
"create_selective_checkpoint_contexts",
"SAC_IGNORED_OPS",
]
_DEFAULT_DETERMINISM_MODE = "default"
_checkpoint_debug_enabled: Optional[bool] = None
@contextlib.contextmanager
def set_checkpoint_debug_enabled(enabled: Optional[bool]):
"""
Context manager that sets whether checkpoint should print additional debug
information when running. See the ``debug`` flag for
:func:`~torch.utils.checkpoint.checkpoint` for more information. Note that
when set, this context manager overrides the value of ``debug`` passed to
checkpoint. To defer to the local setting, pass ``None`` to this context.
Args:
enabled (bool): Whether checkpoint should print debug information.
Default is 'None'.
"""
global _checkpoint_debug_enabled
try:
prev = _checkpoint_debug_enabled
_checkpoint_debug_enabled = enabled
yield
finally:
_checkpoint_debug_enabled = prev
def detach_variable(inputs: Tuple[Any, ...]) -> Tuple[torch.Tensor, ...]:
if isinstance(inputs, tuple):
out = []
for inp in inputs:
if not isinstance(inp, torch.Tensor):
out.append(inp)
continue
x = inp.detach()
x.requires_grad = inp.requires_grad
out.append(x)
return tuple(out)
else:
raise RuntimeError(
"Only tuple of tensors is supported. Got Unsupported input type: ",
type(inputs).__name__,
)
def check_backward_validity(inputs: Iterable[Any]) -> None:
if not any(inp.requires_grad for inp in inputs if isinstance(inp, torch.Tensor)):
warnings.warn(
"None of the inputs have requires_grad=True. Gradients will be None"
)
def _get_device_module(device="cuda"):
if device == "meta":
return torch.device("meta")
device_module = getattr(torch, device)
return device_module
class DefaultDeviceType:
r"""
A class that manages the default device type for checkpointing.
If no non-CPU tensors are present, the default device type will
be used. The default value is 'cuda'. The device type is used in
the checkpointing process when determining which device states
to save and restore for recomputation.
"""
_default_device_type = "cuda"
@staticmethod
def set_device_type(device: str = "cuda"):
"""
Set the default device type for checkpointing.
Args:
device (str): The device type to be set as default. Default is 'cuda'.
"""
DefaultDeviceType._default_device_type = device
@staticmethod
def get_device_type() -> str:
"""
Get the current default device type for checkpointing.
Returns:
str: The current default device type.
"""
return DefaultDeviceType._default_device_type
def _infer_device_type(*args):
device_types = []
def add_device_types(arg):
nonlocal device_types
if isinstance(arg, torch.Tensor) and not arg.device.type == "cpu":
device_types.append(arg.device.type)
tree_map(add_device_types, args)
device_types_set = set(device_types)
if len(device_types_set) > 1:
warnings.warn(
"Tensor arguments, excluding CPU tensors, are detected on at least two types of devices. "
"Device state will only be saved for devices of a single device type, and the remaining "
"devices will be ignored. Consequently, if any checkpointed functions involve randomness, "
"this may result in incorrect gradients. (Note that if CUDA devices are among the devices "
"detected, it will be prioritized; otherwise, the first device encountered will be selected.)"
f"\nDevice types: {sorted(device_types_set)} first device type: {device_types[0]}"
)
if len(device_types) == 0:
return DefaultDeviceType.get_device_type()
elif "cuda" in device_types_set:
return "cuda"
else:
return device_types[0]
# We can't know if the run_fn will internally move some args to different devices,
# which would require logic to preserve rng states for those devices as well.
# We could paranoically stash and restore ALL the rng states for all visible devices,
# but that seems very wasteful for most cases. Compromise: Stash the RNG state for
# the device of all Tensor args.
#
# To consider: maybe get_device_states and set_device_states should reside in torch/random.py?
def get_device_states(*args) -> Tuple[List[int], List[torch.Tensor]]:
# This will not error out if "arg" is a CPU tensor or a non-tensor type because
# the conditionals short-circuit.
fwd_device_ids = []
def add_device_ids(arg):
nonlocal fwd_device_ids
if isinstance(arg, torch.Tensor) and arg.device.type not in {"cpu", "meta"}:
fwd_device_ids.append(arg.get_device())
tree_map(add_device_ids, args)
fwd_device_states = []
device_module = _get_device_module(_infer_device_type(*args))
for device_id in fwd_device_ids:
with device_module.device(device_id):
fwd_device_states.append(device_module.get_rng_state())
return fwd_device_ids, fwd_device_states
def set_device_states(devices, states, *, device_type=None) -> None:
"""Sets random number generator states for the specified devices.
Args:
devices: Device ids to set states for.
states: States to set.
device_type: ``device_type`` of the devices to set states for. Default
is the device returned by a call to ``DefaultDeviceType.get_device_type()``,
which is ``cuda`` if not changed by calling ``DefaultDeviceType::set_device_type()``.
"""
if device_type is None:
device_type = DefaultDeviceType.get_device_type()
if device_type == "meta":
return
device_module = _get_device_module(device_type)
for device, state in zip(devices, states):
with device_module.device(device):
device_module.set_rng_state(state)
def _get_autocast_kwargs(device_type="cuda"):
if torch.amp.is_autocast_available(device_type):
device_autocast_kwargs = {
"enabled": torch.is_autocast_enabled(device_type),
"dtype": torch.get_autocast_dtype(device_type),
"cache_enabled": torch.is_autocast_cache_enabled(),
}
else:
device_autocast_kwargs = None
cpu_autocast_kwargs = {
"enabled": torch.is_autocast_enabled('cpu'),
"dtype": torch.get_autocast_dtype('cpu'),
"cache_enabled": torch.is_autocast_cache_enabled(),
}
return device_autocast_kwargs, cpu_autocast_kwargs
class CheckpointFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, run_function, preserve_rng_state, *args):
check_backward_validity(args)
ctx.run_function = run_function
ctx.preserve_rng_state = preserve_rng_state
# Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu.
ctx.device_type = _infer_device_type(*args)
ctx.device_autocast_kwargs, ctx.cpu_autocast_kwargs = _get_autocast_kwargs(
ctx.device_type
)
if preserve_rng_state:
ctx.fwd_cpu_state = torch.get_rng_state()
# Don't eagerly initialize the cuda context by accident.
# (If the user intends that the context is initialized later, within their
# run_function, we SHOULD actually stash the cuda state here. Unfortunately,
# we have no way to anticipate this will happen before we run the function.)
ctx.had_device_in_fwd = False
device_module = _get_device_module(ctx.device_type)
if getattr(device_module, "_initialized", False):
ctx.had_device_in_fwd = True
ctx.fwd_devices, ctx.fwd_device_states = get_device_states(*args)
# Save non-tensor inputs in ctx, keep a placeholder None for tensors
# to be filled out during the backward.
ctx.inputs = []
ctx.tensor_indices = []
tensor_inputs = []
for i, arg in enumerate(args):
if torch.is_tensor(arg):
tensor_inputs.append(arg)
ctx.tensor_indices.append(i)
ctx.inputs.append(None)
else:
ctx.inputs.append(arg)
ctx.save_for_backward(*tensor_inputs)
with torch.no_grad():
outputs = run_function(*args)
return outputs
@staticmethod
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError(
"When use_reentrant=True, torch.utils.checkpoint is incompatible"
" with .grad() or passing an `inputs` parameter to .backward()."
" To resolve this error, you can either set use_reentrant=False,"
" or call .backward() without passing the `inputs` argument."
)
# Copy the list to avoid modifying original list.
inputs = list(ctx.inputs)
tensor_indices = ctx.tensor_indices
tensors = ctx.saved_tensors
# Fill in inputs with appropriate saved tensors.
for i, idx in enumerate(tensor_indices):
inputs[idx] = tensors[i]
# Stash the surrounding rng state, and mimic the state that was
# present at this time during forward. Restore the surrounding state
# when we're done.
rng_devices = []
if ctx.preserve_rng_state and ctx.had_device_in_fwd:
rng_devices = ctx.fwd_devices
with torch.random.fork_rng(
devices=rng_devices, enabled=ctx.preserve_rng_state, device_type=ctx.device_type
):
if ctx.preserve_rng_state:
torch.set_rng_state(ctx.fwd_cpu_state)
if ctx.had_device_in_fwd:
set_device_states(ctx.fwd_devices, ctx.fwd_device_states, device_type=ctx.device_type)
detached_inputs = detach_variable(tuple(inputs))
device_autocast_ctx = torch.amp.autocast(
device_type=ctx.device_type, **ctx.device_autocast_kwargs
) if torch.amp.is_autocast_available(ctx.device_type) else contextlib.nullcontext()
with torch.enable_grad(), device_autocast_ctx, torch.amp.autocast("cpu", **ctx.cpu_autocast_kwargs): # type: ignore[attr-defined]
outputs = ctx.run_function(*detached_inputs)
if isinstance(outputs, torch.Tensor):
outputs = (outputs,)
# run backward() with only tensor that requires grad
outputs_with_grad = []
args_with_grad = []
for i in range(len(outputs)):
if torch.is_tensor(outputs[i]) and outputs[i].requires_grad:
outputs_with_grad.append(outputs[i])
args_with_grad.append(args[i])
if len(outputs_with_grad) == 0:
raise RuntimeError(
"none of output has requires_grad=True,"
" this checkpoint() is not necessary"
)
torch.autograd.backward(outputs_with_grad, args_with_grad)
grads = tuple(
inp.grad if isinstance(inp, torch.Tensor) else None
for inp in detached_inputs
)
return (None, None) + grads
def noop_context_fn():
return contextlib.nullcontext(), contextlib.nullcontext()
# TorchDynamo does not step inside utils.checkpoint function. The flow
# looks likes this
# 1) TorchDynamo tries to wrap utils.checkpoint in a HigherOrderOp by
# speculatively checking if the forward function is safe to trace.
# 2) If yes, then Dynamo-generated Fx graph has the wrapped higher
# order op. As a result, TorchDynamo does not look inside utils.checkpoint.
# 3) If not, then TorchDynamo falls back to eager by performing a graph
# break. And here, the following disable wrapper ensures that
# TorchDynamo does not trigger again on the frames created by
# utils.checkpoint innards.
@torch._disable_dynamo
def checkpoint(
function,
*args,
use_reentrant: Optional[bool] = None,
context_fn: Callable[[], Tuple[ContextManager, ContextManager]] = noop_context_fn,
determinism_check: str = _DEFAULT_DETERMINISM_MODE,
debug: bool = False,
**kwargs
):
r"""Checkpoint a model or part of the model.
Activation checkpointing is a technique that trades compute for memory.
Instead of keeping tensors needed for backward alive until they are used in
gradient computation during backward, forward computation in checkpointed
regions omits saving tensors for backward and recomputes them during the
backward pass. Activation checkpointing can be applied to any part of a
model.
There are currently two checkpointing implementations available, determined
by the :attr:`use_reentrant` parameter. It is recommended that you use
``use_reentrant=False``. Please refer the note below for a discussion of
their differences.
.. warning::
If the :attr:`function` invocation during the backward pass differs
from the forward pass, e.g., due to a global variable, the checkpointed
version may not be equivalent, potentially causing an
error being raised or leading to silently incorrect gradients.
.. warning::
The ``use_reentrant`` parameter should be passed explicitly. In version
2.4 we will raise an exception if ``use_reentrant`` is not passed.
If you are using the ``use_reentrant=True`` variant, please refer to the
note below for important considerations and potential limitations.
.. note::
The reentrant variant of checkpoint (``use_reentrant=True``) and
the non-reentrant variant of checkpoint (``use_reentrant=False``)
differ in the following ways:
* Non-reentrant checkpoint stops recomputation as soon as all needed
intermediate activations have been recomputed. This feature is enabled
by default, but can be disabled with :func:`set_checkpoint_early_stop`.
Reentrant checkpoint always recomputes :attr:`function` in its
entirety during the backward pass.
* The reentrant variant does not record the autograd graph during the
forward pass, as it runs with the forward pass under
:func:`torch.no_grad`. The non-reentrant version does record the
autograd graph, allowing one to perform backward on the graph within
checkpointed regions.
* The reentrant checkpoint only supports the
:func:`torch.autograd.backward` API for the backward pass without its
`inputs` argument, while the non-reentrant version supports all ways
of performing the backward pass.
* At least one input and output must have ``requires_grad=True`` for the
reentrant variant. If this condition is unmet, the checkpointed part
of the model will not have gradients. The non-reentrant version does
not have this requirement.
* The reentrant version does not consider tensors in nested structures
(e.g., custom objects, lists, dicts, etc) as participating in
autograd, while the non-reentrant version does.
* The reentrant checkpoint does not support checkpointed regions with
detached tensors from the computational graph, whereas the
non-reentrant version does. For the reentrant variant, if the
checkpointed segment contains tensors detached using ``detach()`` or
with :func:`torch.no_grad`, the backward pass will raise an error.
This is because ``checkpoint`` makes all the outputs require gradients
and this causes issues when a tensor is defined to have no gradient in
the model. To avoid this, detach the tensors outside of the
``checkpoint`` function.
Args:
function: describes what to run in the forward pass of the model or
part of the model. It should also know how to handle the inputs
passed as the tuple. For example, in LSTM, if user passes
``(activation, hidden)``, :attr:`function` should correctly use the
first input as ``activation`` and the second input as ``hidden``
preserve_rng_state(bool, optional): Omit stashing and restoring
the RNG state during each checkpoint. Note that under torch.compile,
this flag doesn't take effect and we always preserve RNG state.
Default: ``True``
use_reentrant(bool):
specify whether to use the activation checkpoint variant that
requires reentrant autograd. This parameter should be passed
explicitly. In version 2.5 we will raise an exception if
``use_reentrant`` is not passed. If ``use_reentrant=False``,
``checkpoint`` will use an implementation that does not require
reentrant autograd. This allows ``checkpoint`` to support additional
functionality, such as working as expected with
``torch.autograd.grad`` and support for keyword arguments input into
the checkpointed function.
context_fn(Callable, optional): A callable returning a tuple of two
context managers. The function and its recomputation will be run
under the first and second context managers respectively.
This argument is only supported if ``use_reentrant=False``.
determinism_check(str, optional): A string specifying the determinism
check to perform. By default it is set to ``"default"`` which
compares the shapes, dtypes, and devices of the recomputed tensors
against those the saved tensors. To turn off this check, specify
``"none"``. Currently these are the only two supported values.
Please open an issue if you would like to see more determinism
checks. This argument is only supported if ``use_reentrant=False``,
if ``use_reentrant=True``, the determinism check is always disabled.
debug(bool, optional): If ``True``, error messages will also include
a trace of the operators ran during the original forward computation
as well as the recomputation. This argument is only supported if
``use_reentrant=False``.
args: tuple containing inputs to the :attr:`function`
Returns:
Output of running :attr:`function` on :attr:`*args`
"""
if use_reentrant is None:
warnings.warn(
"torch.utils.checkpoint: the use_reentrant parameter should be "
"passed explicitly. In version 2.5 we will raise an exception "
"if use_reentrant is not passed. use_reentrant=False is "
"recommended, but if you need to preserve the current default "
"behavior, you can pass use_reentrant=True. Refer to docs for more "
"details on the differences between the two variants.",
stacklevel=2
)
use_reentrant = True
# Hack to mix *args with **kwargs in a python 2.7-compliant way
preserve = kwargs.pop("preserve_rng_state", True)
if kwargs and use_reentrant:
raise ValueError(
"Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)
)
if use_reentrant:
if context_fn is not noop_context_fn or debug is not False:
raise ValueError(
"Passing `context_fn` or `debug` is only supported when "
"use_reentrant=False."
)
return CheckpointFunction.apply(function, preserve, *args)
else:
gen = _checkpoint_without_reentrant_generator(
function, preserve, context_fn, determinism_check, debug, *args, **kwargs
)
# Runs pre-forward logic
next(gen)
ret = function(*args, **kwargs)
# Runs post-forward logic
try:
next(gen)
except StopIteration:
return ret
def checkpoint_sequential(functions, segments, input, use_reentrant=None, **kwargs):
r"""Checkpoint a sequential model to save memory.
Sequential models execute a list of modules/functions in order
(sequentially). Therefore, we can divide such a model in various segments
and checkpoint each segment. All segments except the last will not store
the intermediate activations. The inputs of each checkpointed segment will
be saved for re-running the segment in the backward pass.
.. warning::
The ``use_reentrant`` parameter should be passed explicitly. In version
2.4 we will raise an exception if ``use_reentrant`` is not passed.
If you are using the ``use_reentrant=True` variant, please see
:func:`~torch.utils.checkpoint.checkpoint` for
the important considerations and limitations of this variant. It is
recommended that you use ``use_reentrant=False``.
.. warning:
Since PyTorch 1.4, it allows only one Tensor as the input and
intermediate outputs, just like :class:`torch.nn.Sequential`.
Args:
functions: A :class:`torch.nn.Sequential` or the list of modules or
functions (comprising the model) to run sequentially.
segments: Number of chunks to create in the model
input: A Tensor that is input to :attr:`functions`
preserve_rng_state(bool, optional): Omit stashing and restoring
the RNG state during each checkpoint.
Default: ``True``
use_reentrant(bool):
specify whether to use the activation checkpoint variant that
requires reentrant autograd. This parameter should be passed
explicitly. In version 2.5 we will raise an exception if
``use_reentrant`` is not passed. If ``use_reentrant=False``,
``checkpoint`` will use an implementation that does not require
reentrant autograd. This allows ``checkpoint`` to support additional
functionality, such as working as expected with
``torch.autograd.grad`` and support for keyword arguments input into
the checkpointed function.
Returns:
Output of running :attr:`functions` sequentially on :attr:`*inputs`
Example:
>>> # xdoctest: +SKIP("stub")
>>> model = nn.Sequential(...)
>>> input_var = checkpoint_sequential(model, chunks, input_var)
"""
if use_reentrant is None:
warnings.warn(
"torch.utils.checkpoint.checkpoint_sequential: the use_reentrant "
"parameter should be passed explicitly. "
"In version 2.5 we will raise an exception if use_reentrant "
"is not passed. use_reentrant=False is "
"recommended, but if you need to preserve the current default "
"behavior, you can pass use_reentrant=True. Refer to docs for more "
"details on the differences between the two variants."
)
use_reentrant = True
# Hack for keyword-only parameter in a python 2.7-compliant way
preserve = kwargs.pop("preserve_rng_state", True)
if kwargs:
raise ValueError(
"Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)
)
def run_function(start, end, functions):
def forward(input):
for j in range(start, end + 1):
input = functions[j](input)
return input
return forward
if isinstance(functions, torch.nn.Sequential):
functions = list(functions.children())
segment_size = len(functions) // segments
# the last chunk has to be non-volatile
end = -1
for start in range(0, segment_size * (segments - 1), segment_size):
end = start + segment_size - 1
input = checkpoint(
run_function(start, end, functions),
input,
use_reentrant=use_reentrant,
preserve_rng_state=preserve,
)
return run_function(end + 1, len(functions) - 1, functions)(input)
def _internal_assert(cond):
if not cond:
raise AssertionError(
"Something went unexpectedly wrong in activation checkpoint. "
"Please report this bug by filing an issue to PyTorch."
)
# NOTE [ Nestable Checkpoint ]
#
# The semantics of nested checkpoint can be defined by two basic rules.
# Following the two rules leads to an important implication that is central
# to motivating the design.
#
# Rule 1. Saved tensors are managed by inner-most checkpoint only and hidden
# from any outer layers of checkpoint.
#
# Rule 2. The inputs of inner checkpoints are treated as tensors saved to its
# parent checkpoint.
#
# Implication: To recompute any given saved tensor, we need to recompute all of
# the checkpoints wrapping it.
#
# Why is this implied? To unpack a saved tensor X during backward we need to
# recompute the inner-most checkpoint (#1), and in order to recompute that
# checkpoint I need to have its inputs, which are managed by that checkpoint's
# parent (#2), which thus also needs to be recomputed first. Continue this line
# of reasoning and we realize that in order to unpack X, all checkpoints that
# were active at the time X was saved need to be recomputed. (unless we have
# already done so in that backward for some other saved tensor).
#
# In practice, we use a noop autograd Function to save inputs as saved tensors.
# During unpack calling ctx.saved_tensor triggers the parent checkpoint to
# recompute.
#
# Rule 3. We should start recomputation as if there are no checkpoints currently
# active. Checkpoints encountered during recomputation are still
# respected.
#
# When we start recomputation, we push the saved variable hook meant for
# recomputation on the stack. See examples in Rule 6 for more context.
#
# * * * *
#
# Beyond the basic semantics specific to nested checkpoint, we impose several
# more constraints that may apply to checkpointing in general.
#
# Rule 4. Lifetime of recomputed tensors
#
# Recomputed tensors are considered specific to particular invocations
# of backward and are always cleared immediately as they are unpacked
# Particularly, we require this to happen even if retain_graph=True.
#
# [ Implementation details of Rule 4 ]
#
# If we were okay with recomputed tensors staying alive after backward is run
# with retain_graph=True, we would store recomputed variables as the values of a
# WeakKeyDictionary and pack strong references to the keys, so that as we
# backward, those packed keys would be cleared as long as retain_graph=False.
# Clearing the packed key clears the corresponding entry in the WKD.
#
# If we wish recomputed variables to be immediately cleared as we unpack them in
# the retain_graph=True case, we cannot rely on the packed keys to be cleared by
# backward automatically. Instead of packing the strong reference to the key
# directly, we pack a container object, which we manually clear as we unpack.
#
# An important detail is that if a second backward happens, the second
# recomputation needs to reset the container with a newly created key.
#
# Rule 5. Stop recomputation as soon as we've recomputed the saved tensors we
# know we need.
#
# [ Implementation details of Rule 5 ]
#
# During recomputation, raise an exception if the number of recomputed tensors
# matches the number of tensors that we expected to recompute. We wrap the
# recomputation call with a try-catch to catch this specific exception. See
# Rule #6 below for some examples.
#
# Rule 6. We support doing backward inside checkpoint context
#
# [ retain_graph is True]
#
# def fn(x):
# y = x.sin()
# z = y.cos()
# gx, = torch.autograd.grad(z, x, retains_grad=True)
# return gx, z
#
# out = checkpoint(fn)(inp)
# out.backward()
#
# Because z is saved by cos while checkpoint is enabled, it would not be
# actually saved, and so the .grad() call inside must trigger a recomputation.
#
# During recomputation the "inner pack hook" has two responsibilities:
#
# 1) As usual, populating the WeakKeyDictionary storing recomputed tensors
# 2) Pack the actual tensor (detached) so that one may perform backward on the
# recomputed graph. The tensors saved to this graph will live until the end
# of recomputation, or die earlier if someone performs backward with
# retain_graph=False.
#
# More generally performing backward on the recomputed graph occurs in the
# following cases:
# - If backward is performed inside forward,
# - During the original forward IF early-stop is disabled
# - During the original backward
# - If there are multiple .grad()/.backward() calls, we would perform backward
# on the recomputed graph even if early-stop is enabled (see the example below)
#
# [ retain_graph is False ]
#
# The example below shows what happens if during recomputation we find that some
# of the tensors we are trying to recompute have already been cleared.
#
# Spoiler: we don't do anything special, we just skip over them!
#
# def fn(x):
# y = x.sin() # (1)
# z = y.cos() # (2)
# gx, = torch.autograd.grad(z, x) # (3)
# return x.cos() * gx # (4)
#
# out = checkpoint(fn)(inp)
# out.backward() # (5)
#
# 1, 2. Don't save x and y since we are inside a checkpoint.
# 3. Trigger a recompute of fn since x and y weren't saved.
# And depending on whether early stop is enabled, either stop at (2) or
# continue running the function.
# Because we are running backward with retain_graph=False, we clear x and y's
# holders.
# 4. Don't save x since we are inside a checkpoint.
# 5. Calling backward triggers another recompute of fn. During recompute, we see
# that x and y have already been cleared in the original graph as indicated
# by holder=None. We skip over them. We still save x at (4) (since its holder
# is still alive.)
_enable_checkpoint_early_stop = True
@contextlib.contextmanager
def set_checkpoint_early_stop(enable: bool):
"""Context manager that sets whether checkpoint should stop recomputation early.
By default, non-reentrant checkpoint stops recomputation as soon as it
has computed all needed Tensors. This context manager can be used to disable
that feature if it is problematic for your specific application.
This context manager only needs to be active when forward is run. It does
not need to be active during backward.
Example::
>>> # xdoctest: +SKIP(failing)
>>> message = "saved tensors default hooks are disabled"
>>> with set_checkpoint_early_stop(False):
... # Any checkpoint under this context manager will respect this
... # context manager, even if its backward is performed outside.
... out = checkpoint(fn, inputs)
...
>>> out.backward()
"""
global _enable_checkpoint_early_stop
try:
prev = _enable_checkpoint_early_stop
_enable_checkpoint_early_stop = enable
yield
finally:
_enable_checkpoint_early_stop = prev
class _Handle:
pass
class _Holder:
def __init__(self):
self.handles: Dict[int, Optional[_Handle]] = {}
class _NoopSaveInputs(torch.autograd.Function):
@staticmethod
def forward(*args):
return torch.empty((0,))
@staticmethod
def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
# Only tensors can be saved with ctx.save_for_backward, everything else
# is captured by get_args, which is saved directly on ctx
tensor_indices, tensors = zip(
*[(i, o) for i, o in enumerate(inputs) if isinstance(o, torch.Tensor)]
)
idx2saved_idx = {b: a for a, b in enumerate(tensor_indices)}
# args but with tensors replaced with None as placeholders
args = [None if isinstance(o, torch.Tensor) else o for o in inputs]
def get_args(saved_tensors):
# restore the placeholders with the original tensors grabbed from
# ctx.saved_tensors (which may be saved on a parent checkpoint if
# this checkpoint is nested, and that would trigger a recursive
# unpack!)
ret = [
saved_tensors[idx2saved_idx[i]] if i in tensor_indices else o
for i, o in enumerate(args)
]
# grab the tail since we also saved the dummy to avoid having to explicitly
# handle the case where there are no tensor inputs
return ret[1:]
ctx.get_args = get_args
ctx.save_for_backward(*tensors)
@staticmethod
def backward(ctx, *grad_outputs):
raise AssertionError("Did not expect to backward on this graph")
class _CheckpointFrame:
def __init__(self, recompute_fn, early_stop, unpack_error_cb, metadata_fn):
self.recompute_fn = recompute_fn
self.input_saver = None
self.weak_holders: List[ReferenceType] = []
# We store this as a weakkeydictionary so that in the case of a partial
# backward, the entries in the dict are cleared alongside the Holder
# which will be removed when the SavedVariable is cleared.
self.recomputed: DefaultDict[
int, weakref.WeakKeyDictionary[_Handle, torch.Tensor]
] = defaultdict(weakref.WeakKeyDictionary)
# We need both recomp_counter and recomputed since they can diverge
# https://github.com/pytorch/pytorch/pull/90105#discussion_r1135889885
self.recomp_counter: DefaultDict[int, int] = defaultdict(int)
self.is_recomputed: DefaultDict[int, bool] = defaultdict(bool)
# See Rule 5
self.early_stop = early_stop
# Debugging
self.metadata_fn = metadata_fn
self.unpack_error_cb = unpack_error_cb
self.x_metadatas = []
self.forward_completed = False
self.ignore_saved_mismatch = False
def check_recomputed_tensors_match(self, gid):
if self.ignore_saved_mismatch:
# TODO: we can probably make this check stricter by checking that
# the metadata of the first tensors still match.
return
# NOTE [ Error handling for checkpoint ]
#
# At a high level, we need to check that the tensors saved
# during original forward matches tensors saved during recompute
# This means handling 3 cases:
#
# 1. During recompute, more tensors were saved.
#
# Usually this is hidden due to the StopRecomputationError
# but if early stop is not enabled, or we would have errored
# anyway because there aren't enough weak_holders. But we
# do want to have a nice error. See the _recomputation_hook
# for details.
if not len(self.weak_holders) == self.recomp_counter[gid]:
# 2. During recompute, fewer tensors were saved
#
# We know that everytime we save something do original forward
# we append to weak_holder, and every time we save a tensor
# during recompute we increment recompute_counter.
raise CheckpointError(
"torch.utils.checkpoint: A different number of tensors was saved "
"during the original forward and recomputation.\n"
f"Number of tensors saved during forward: {len(self.weak_holders)}\n"
f"Number of tensors saved during recomputation: {self.recomp_counter[gid]}"
)
# 3. During recompute, the same tensors were saved, but they
# have different metadata
nb_meta_different = []
for idx, weak_holder in enumerate(self.weak_holders):
holder = weak_holder()
if holder is None:
continue
# We've seen all holders since we iterate over them in order
# For every holder that is still alive now, it must've been
# alive when we saw it during recompute, therefore, the
# gid must be set.
_internal_assert(gid in holder.handles)
# We know this is the first unpack, so it couldn't have been set
# to None yet.
_internal_assert(holder.handles[gid] is not None)
# We always set these together in the recomputation hook
_internal_assert(holder.handles[gid] in self.recomputed[gid])
# see pack hook, x_metadata is 1:1 with weak_holders.
x_meta = self.x_metadatas[idx]
recomputed_x = self.recomputed[gid][holder.handles[gid]]
if x_meta != self.metadata_fn(recomputed_x):
nb_meta_different.append((idx, x_meta, self.metadata_fn(recomputed_x)))
if len(nb_meta_different) > 0:
mismatched_tensors = ""
for idx, x_meta, recomputed_meta in nb_meta_different:
mismatched_tensors += (
f"tensor at position {idx}:\n"
f"saved metadata: {x_meta}\n"
f"recomputed metadata: {recomputed_meta}\n"
)
raise CheckpointError(
"torch.utils.checkpoint: Recomputed values for the following tensors "
"have different metadata than during the forward pass.\n"
f"{mismatched_tensors}"
)
_checkpoint_error_template = """ \
An error happened while unpacking tensors; dumping logs of latest computation
because you passed `debug=True` to `torch.utils.checkpoint.checkpoint()`.
Scroll all the way down for guidance on how to navigate these logs.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
| 1. Stack traces of the operators that ran in the original forward |
+------------------------------------------------------------------------------+
{forward_traces}
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
| 2. Stack traces of the operators that ran during recomputation |
+------------------------------------------------------------------------------+
{recompute_traces}
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+
| 3. Log of operators in the original forward and recomputation |
+------------------------------------------------------------------------------+
(Scroll up to correlate stack traces with each operation listed below. This
helps identify their source in the code.)
IMPORTANT: Differences in "detach" calls between the original forward and the
recomputation are expected. They are introduced by the checkpointing
mechanism and can be ignored.
Operations executed during the original forward:
{forward_ops}
Operations executed during recomputation:
{recompute_ops}
+------------------------------------------------------------------------------+
ERROR: Detected non-determinism while running activation checkpointing
You are seeing this error because you passed `debug=True` to checkpoint and
tensors to be saved during the original forward and differ between those saved
during recomputation. This can happen if different operators were ran in the
original forward and in the recomputation.
To identify where the mismatch may be coming from, you can do the following:
1) Compare the operators ran during original forward and recomputation to
see where they differ. These operators are printed above in the order they
were executed.
2) Review the stack trace for each operator to locate its invocation source.
Each operator's stack trace is printed in their execution order.
Note that the logs can be quite long. Here's how they are structured:
(Tip: you can Ctrl-f for these headers)
1. Stack traces of the operators that ran in the original forward
2. Stack traces of the operators that ran during recomputation
3. Log of operators in the original forward and recomputation
4. Error message <--- You are here
--------------------------------------------------------------------------------
"""
class CheckpointError(RuntimeError):
pass
def _get_debug_context_and_cb() -> Tuple[Callable[[], Any], Callable[[CheckpointError], None]]:
# This function returns the context_fn and error_cb to be used by the
# checkpointing mechanism. error_cb is invoked when an error is detected
# during unpack.
# record_context_cpp is not support on non-linux non-x86_64 platforms
cpp_tb = platform.machine() == 'x86_64' and platform.system() == 'Linux'
class CaptureLogs:
def __init__(self):
self.logs = None
self.tbs = None
def get_context_manager(self):
@contextlib.contextmanager
def logging_mode():
with LoggingTensorMode(), \
capture_logs(True, python_tb=True, script_tb=True, cpp_tb=cpp_tb) as logs_and_tb:
self.logs, self.tbs = logs_and_tb
yield logs_and_tb
return logging_mode()
capture_logs_fwd = CaptureLogs()
capture_logs_recompute = CaptureLogs()
def unpack_error_cb(e: CheckpointError):
def get_str_tb(label, capture_logs):
out = ""
total_len = len(capture_logs.logs)
for i, (log, tb) in enumerate(zip(capture_logs.logs, capture_logs.tbs)):
out += f"{log} ({i + 1} of {total_len} in {label})\n\n"
found_torch_dispatch = False
for line in tb:
# Start printing stack trace only after __torch_dispatch__ is found
is_torch_dispatch = line['name'] == '__torch_dispatch__'
if not found_torch_dispatch and not is_torch_dispatch:
continue
elif is_torch_dispatch:
found_torch_dispatch = True
continue
out += f"{line['filename']}:{line['line']}:{line['name']}\n"
out += "\n\n"
return out
assert capture_logs_fwd.logs is not None
assert capture_logs_recompute.logs is not None
raise CheckpointError(
_checkpoint_error_template.format(
forward_traces=get_str_tb("original", capture_logs_fwd),
recompute_traces=get_str_tb("recompute", capture_logs_recompute),
forward_ops="\n".join(capture_logs_fwd.logs),
recompute_ops="\n".join(capture_logs_recompute.logs)
)
) from e
def context_fn():
return capture_logs_fwd.get_context_manager(), capture_logs_recompute.get_context_manager()
return context_fn, unpack_error_cb
def _default_meta_extractor(x: torch.Tensor) -> Dict[str, Any]:
# These properties are fast to check, easy to understand
return {
"shape": x.shape,
"dtype": x.dtype,
"device": x.device
}
_allowed_determinism_checks_to_fns: Dict[str, Callable[[torch.Tensor], Any]] = {
_DEFAULT_DETERMINISM_MODE: _default_meta_extractor,
"none": lambda _: None,
}
# See Rule 5
class _StopRecomputationError(Exception):
pass
class _recomputation_hook(torch.autograd.graph.saved_tensors_hooks):
def __init__(self, target_frame_ref: ReferenceType, gid: int):
def pack_hook(x):
x = x.detach() if x.requires_grad else x
target_frame = target_frame_ref()
assert target_frame is not None # appease mypy
recomp_idx = target_frame.recomp_counter[gid]
target_frame.recomp_counter[gid] += 1
if recomp_idx >= len(target_frame.weak_holders):
assert not target_frame.early_stop
if not target_frame.forward_completed:
# We run into this case when early stop is not enabled and do
# grad within checkpoint.
# We need to set this flag, so we don't error out later when
# we check if the number of tensors saved during forward and
# recomputation match.
target_frame.ignore_saved_mismatch = True
return x
raise CheckpointError(
"torch.utils.checkpoint: trying to save more tensors during "
"recomputation than during the original forward pass."
)
holder = target_frame.weak_holders[recomp_idx]()
# This holder may have been cleared because someone may have called
# backward within forward. If so, we don't need to save.
if holder is not None:
_internal_assert(holder.handles.get(gid, None) is None)
holder.handles[gid] = _Handle()
target_frame.recomputed[gid][holder.handles[gid]] = x
if target_frame.early_stop and target_frame.recomp_counter[gid] == len(
target_frame.weak_holders
):
raise _StopRecomputationError
# See Rule 6: [ retain_graph is True ] above
return x
def unpack_hook(x):
# See Rule 6: [ retain_graph is True ] above for an example of when
# the graph created during recomputation could be backwarded.
return x
super().__init__(pack_hook, unpack_hook)
class _checkpoint_hook(torch.autograd.graph.saved_tensors_hooks):
def __init__(self, frame):
def pack_hook(x):
# See Rule 4 above
holder = _Holder()
frame.weak_holders.append(weakref.ref(holder))
# Save metadata to detect non-determinism
if frame.metadata_fn is not None:
with torch.no_grad():
frame.x_metadatas.append(frame.metadata_fn(x))
return holder
def unpack_hook(holder):
gid = torch._C._current_graph_task_id()
if gid == -1:
# generate a temporary id if we trigger unpack outside of a backward call
gid = int(uuid.uuid4())
if not frame.is_recomputed[gid]:
ctx = frame.input_saver.grad_fn
args = ctx.get_args(ctx.saved_tensors)
try:
with _recomputation_hook(
weakref.ref(frame), gid
), torch.autograd.enable_grad():
frame.recompute_fn(*args)
except _StopRecomputationError:
pass
frame.is_recomputed[gid] = True
frame.check_recomputed_tensors_match(gid)
_internal_assert(gid in holder.handles)
if holder.handles[gid] is None:
raise CheckpointError(
"torch.utils.checkpoint: Unpack is being triggered for a tensor that was already "
"unpacked once. If you are calling ctx.saved_tensors in backward, make sure to do "
"so only once. Otherwise please open an issue with details on your use case."
)
_internal_assert(holder.handles[gid] in frame.recomputed[gid])
ret = frame.recomputed[gid][holder.handles[gid]]
holder.handles[gid] = None
return ret
if frame.unpack_error_cb is not None:
def unpack_hook_with_error_cb(holder):
try:
return unpack_hook(holder)
except CheckpointError as e:
frame.unpack_error_cb(e)
super().__init__(pack_hook, unpack_hook_with_error_cb)
else:
super().__init__(pack_hook, unpack_hook)
def _is_compiling(func, args, kwargs):
# Check if we are under AOTAutograd tracing
# There should probably be a better way to do this...
# TODO: unify _is_compiling across all compile stacks
for arg in args:
if isinstance(arg, torch.Tensor) and is_fun(arg):
return True
return False
class _VersionWrapper:
# Check that cached tensors are not mutated.
def __init__(self, val):
self.val: Union[torch.Tensor, Any] = val
self.version: Optional[int] = val._version if isinstance(val, torch.Tensor) else None
def get_val(self, allow_cache_entry_mutation):
if self.version is not None and not allow_cache_entry_mutation:
if self.val._version != self.version:
# Can we give user a stack trace of where the mutation happened?
raise RuntimeError(
"Tensor cached during selective activation checkpoint has been mutated"
)
return self.val
def _maybe_detach(x, any_ret_has_alias_info):
# We detach for two separate reasons:
# - For view ops, we need to ensure that when the tensor is returned from
# CachedDispatchMode, as_view sees that the AutogradMeta is nullptr
# - Avoid reference cycles
# For case 1, it is not enough to check whether x has differentiable dtype
# because non-differentiable dtype can have non-nullptr AutogradMeta, e.g.
# when the tensor is a view.
if isinstance(x, torch.Tensor) and (x.is_floating_point() or x.is_complex() or any_ret_has_alias_info):
with torch._C._SetExcludeDispatchKeyGuard(torch._C.DispatchKey.ADInplaceOrView, False):
# Ensure that view performed beneath autograd properly propagates
# version counter. TODO: Use reentrant_dispatch instead of
# manually manipulating dispatch keys. Using reentrant_dispatch
# would respect inference_mode, though that is not relevant for
# this case.
x = x.detach()
return x
class SelectiveCheckpointContext:
"""
Context passed to policy function during selective checkpointing.
This class is used to pass relevant metadata to the policy function during
selective checkpointing. The metadata includes whether the current invocation
of the policy function is during recomputation or not.
Example:
>>> # xdoctest: +SKIP(stub)
>>>
>>> def policy_fn(ctx, op, *args, **kwargs):
>>> print(ctx.is_recompute)
>>>
>>> context_fn = functools.partial(create_selective_checkpoint_contexts, policy_fn)
>>>
>>> out = torch.utils.checkpoint.checkpoint(
>>> fn, x, y,
>>> use_reentrant=False,
>>> context_fn=context_fn,
>>> )
"""
def __init__(self, *, is_recompute):
self.is_recompute = is_recompute
class CheckpointPolicy(enum.Enum):
"""
Enum for specifying the policy for checkpointing during backpropagation.
The following policies are supported:
- ``{MUST,PREFER}_SAVE``: The operation's output will be saved during the forward
pass and will not be recomputed during the backward pass
- ``{MUST,PREFER}_RECOMPUTE``: The operation's output will not be saved during the
forward pass and will be recomputed during the backward pass
Use ``MUST_*`` over ``PREFER_*`` to indicate that the policy should not be overridden
by other subsystems like `torch.compile`.
.. note::
A policy function that always returns ``PREFER_RECOMPUTE`` is
equivalent to vanilla checkpointing.
A policy function that returns ``PREFER_SAVE`` every op is
NOT equivalent to not using checkpointing. Using such a policy would
save additional tensors not limited to ones that are actually needed for
gradient computation.
"""
MUST_SAVE = 0
PREFER_SAVE = 1
MUST_RECOMPUTE = 2
PREFER_RECOMPUTE = 3
def _policy_from_bool(b):
# For backward compatability
return CheckpointPolicy.MUST_SAVE if b else CheckpointPolicy.PREFER_RECOMPUTE
SAC_IGNORED_OPS = {
# AC inserts different number of detach during forward and recompute.
torch.ops.aten.detach.default,
# AC's determinism check invokes additional metadata ops during forward.
# With subclasses involved, these metadata ops become dispatchable, this
# can result in incorrectness if these ops are selected cached.
torch.ops.prim.device.default,
} | set(torch._subclasses.functional_tensor.FunctionalTensor.metadata_fns)
class _CachingTorchDispatchMode(TorchDispatchMode):
# Used together with _CachedTorchDispatchMode to implement SAC.
def __init__(self, policy_fn, storage):
self.policy_fn = policy_fn
self.storage = storage
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if func in SAC_IGNORED_OPS:
return func(*args, **kwargs)
kwargs = {} if kwargs is None else kwargs
policy = self.policy_fn(SelectiveCheckpointContext(is_recompute=False),
func, *args, **kwargs)
if isinstance(policy, bool):
policy = _policy_from_bool(policy)
is_compiling = _is_compiling(func, args, kwargs)
if is_compiling:
# Overwrite each node's "recompute" tag to add in the user annotation.
fx_traceback.current_meta["recompute"] = policy
out = func(*args, **kwargs)
any_ret_has_alias_info = any(ret.alias_info is not None for ret in func._schema.returns)
if policy in (CheckpointPolicy.MUST_SAVE, CheckpointPolicy.PREFER_SAVE) or is_compiling:
self.storage[func].append(tree_map(lambda x: _VersionWrapper(_maybe_detach(x, any_ret_has_alias_info)), out))
return out
class _CachedTorchDispatchMode(TorchDispatchMode):
# Used together with _CachedTorchDispatchMode to implement SAC.
def __init__(self, policy_fn, storage, allow_cache_entry_mutation):
self.policy_fn = policy_fn
self.storage = storage
self.allow_cache_entry_mutation = allow_cache_entry_mutation
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if func in SAC_IGNORED_OPS:
return func(*args, **kwargs)
kwargs = {} if kwargs is None else kwargs
policy = self.policy_fn(SelectiveCheckpointContext(is_recompute=True),
func, *args, **kwargs)
if isinstance(policy, bool):
policy = _policy_from_bool(policy)
is_compiling = _is_compiling(func, args, kwargs)
if policy in (CheckpointPolicy.MUST_SAVE, CheckpointPolicy.PREFER_SAVE) or is_compiling:
storage = self.storage.get(func)
if storage is None:
raise RuntimeError(f"{func} encountered during backward, but not found in storage")
if len(storage) == 0:
raise RuntimeError(
"Trying to backward an extra time. You are only allowed to backward once "
"on any region computed under selective activation checkpoint."
)
out = tree_map(lambda x: x.get_val(self.allow_cache_entry_mutation), storage.pop(0))
else:
out = func(*args, **kwargs)
return out
def create_selective_checkpoint_contexts(policy_fn_or_list, allow_cache_entry_mutation=False):
"""
Helper to avoid recomputing certain ops during activation checkpointing.
Use this with `torch.utils.checkpoint.checkpoint` to control which
operations are recomputed during the backward pass.
Args:
policy_fn_or_list (Callable or List):
- If a policy function is provided, it should accept a
:class:`SelectiveCheckpointContext`, the :class:`OpOverload`, args and
kwargs to the op, and return a :class:`CheckpointPolicy` enum value
indicating whether the execution of the op should be recomputed or not.
- If a list of operations is provided, it is equivalent to a policy
returning `CheckpointPolicy.MUST_SAVE` for the specified
operations and `CheckpointPolicy.PREFER_RECOMPUTE` for all other
operations.
allow_cache_entry_mutation (bool, optional): By default, an error is
raised if any tensors cached by selective activation checkpoint are
mutated in order to ensure correctness. If set to `True`, this check
is disabled.
Returns:
A tuple of two context managers.
Example:
>>> # xdoctest: +REQUIRES(LINUX)
>>> import functools
>>>
>>> x = torch.rand(10, 10, requires_grad=True)
>>> y = torch.rand(10, 10, requires_grad=True)
>>>
>>> ops_to_save = [
>>> torch.ops.aten.mm.default,
>>> ]
>>>
>>> def policy_fn(ctx, op, *args, **kwargs):
>>> if op in ops_to_save:
>>> return CheckpointPolicy.MUST_SAVE
>>> else:
>>> return CheckpointPolicy.PREFER_RECOMPUTE
>>>
>>> context_fn = functools.partial(create_selective_checkpoint_contexts, policy_fn)
>>>
>>> # or equivalently
>>> context_fn = functools.partial(create_selective_checkpoint_contexts, ops_to_save)
>>>
>>> def fn(x, y):
>>> return torch.sigmoid(torch.matmul(torch.matmul(x, y), y)) * y
>>>
>>> out = torch.utils.checkpoint.checkpoint(
>>> fn, x, y,
>>> use_reentrant=False,
>>> context_fn=context_fn,
>>> )
"""
# NB: If grad_mode is disabled, checkpoint would not run forward under
# context_fn anyway, so proceed as usual.
if isinstance(policy_fn_or_list, list):
for op in policy_fn_or_list:
if not isinstance(op, torch._ops.OpOverload):
_extra_msg = (
"Please update the OpOverloadPacket to a specific OpOverload."
"For example, if you have `torch.ops.aten.mm`, change it to `torch.ops.aten.mm.default`."
) if isinstance(op, torch._ops.OpOverloadPacket) else ""
raise ValueError(
f"Expected op in `op_list` to be an OpOverload but got: {op} "
f"of type {type(op)}. {_extra_msg}"
)
def policy_fn(ctx, op, *args, **kwargs):
if op in policy_fn_or_list:
return CheckpointPolicy.MUST_SAVE
else:
return CheckpointPolicy.PREFER_RECOMPUTE
elif callable(policy_fn_or_list):
policy_fn = policy_fn_or_list
else:
raise TypeError("policy_fn_or_list must be either a function or a list of ops.")
storage: Dict[Any, List[Any]] = defaultdict(list)
return (
_CachingTorchDispatchMode(policy_fn, storage),
_CachedTorchDispatchMode(policy_fn, storage, allow_cache_entry_mutation),
)
# NB: this helper wraps fn before calling checkpoint_impl. kwargs and
# saving/restoring of global state is handled here.
def _checkpoint_without_reentrant_generator(
fn,
preserve_rng_state=True,
context_fn: Callable[[], Tuple[ContextManager, ContextManager]] = noop_context_fn,
determinism_check: str = _DEFAULT_DETERMINISM_MODE,
debug: bool = False,
*args,
**kwargs
):
"""Checkpointing without reentrant autograd.
Args:
fn: describes what to run in the forward pass of the model or
part of the model. It should also know how to handle the inputs
passed as the tuple. For example, in LSTM, if user passes
``(activation, hidden)``, :attr:`function` should correctly use the
first input as ``activation`` and the second input as ``hidden``
preserve_rng_state(bool, optional): Omit stashing and restoring
the RNG state during each checkpoint.
Default: ``True``
context_fn(Callable, optional): A callable returning a tuple of two
context managers. The function and its recomputation will be run
under the first and second context managers respectively.
determinism_check(str, optional): A string specifying the determinism
check to perform. By default it is set to ``"default"`` which
compares the shapes, dtypes, and devices of the recomputed tensors
against those the saved tensors. To turn off this check, specify
``"none"``. Currently these are the only two supported values.
Please open an issue if you would like to see more determinism
checks.
debug(bool, optional): If ``True``, error messages will also include
a trace of the operators ran during the original forward computation
as well as the recomputation.
*args: Arguments to pass in to the given ``function``.
**kwargs: Keyword arguments to pass into the given ``function``.
"""
unpack_error_cb = None
if _checkpoint_debug_enabled if _checkpoint_debug_enabled is not None else debug:
if context_fn != noop_context_fn:
raise ValueError(
"debug=True is incompatible with non-default context_fn"
)
context_fn, unpack_error_cb = _get_debug_context_and_cb()
if determinism_check in _allowed_determinism_checks_to_fns:
metadata_fn = _allowed_determinism_checks_to_fns[determinism_check]
else:
raise ValueError(
f"determinism_check should be one of {list(_allowed_determinism_checks_to_fns.keys())}, "
f"but got {determinism_check}"
)
device_type = _infer_device_type(*args)
device_module = _get_device_module(device_type)
forward_context, recompute_context = context_fn()
if _is_compiling(fn, args, kwargs) and context_fn != noop_context_fn:
assert (
isinstance(forward_context, TorchDispatchMode) and
isinstance(recompute_context, TorchDispatchMode)
), \
"In torch.compile mode, `context_fn` arg passed to `torch.utils.checkpoint` " + \
"must generate a tuple of two `TorchDispatchMode`s."
# Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu.
device_autocast_kwargs, cpu_autocast_kwargs = _get_autocast_kwargs(device_type=device_type)
if preserve_rng_state:
fwd_cpu_state = torch.get_rng_state()
# Don't eagerly initialize the cuda context by accident.
# (If the user intends that the context is initialized later, within their
# run_function, we SHOULD actually stash the cuda state here. Unfortunately,
# we have no way to anticipate this will happen before we run the function.
# If they do so, we raise an error.)
had_device_in_fwd = False
if getattr(device_module, "_initialized", False):
had_device_in_fwd = True
fwd_devices, fwd_device_states = get_device_states(*args)
def recompute_fn(*inputs):
kwargs, *args = inputs
# This will be called later during recomputation. This wrapping enables
# the necessary global state to be captured.
rng_devices = []
if preserve_rng_state and had_device_in_fwd:
rng_devices = fwd_devices
with torch.random.fork_rng(
devices=rng_devices, enabled=preserve_rng_state, device_type=device_type
):
if preserve_rng_state:
torch.set_rng_state(fwd_cpu_state)
if had_device_in_fwd:
set_device_states(fwd_devices, fwd_device_states, device_type=device_type)
device_autocast_ctx = torch.amp.autocast(
device_type=device_type, **device_autocast_kwargs
) if torch.amp.is_autocast_available(device_type) else contextlib.nullcontext()
with device_autocast_ctx, torch.amp.autocast("cpu", **cpu_autocast_kwargs), recompute_context: # type: ignore[attr-defined]
fn(*args, **kwargs)
new_frame = _CheckpointFrame(
recompute_fn,
_enable_checkpoint_early_stop,
unpack_error_cb,
metadata_fn
)
dummy = torch.empty((0,), requires_grad=True)
new_frame.input_saver = _NoopSaveInputs.apply(dummy, kwargs, *args)
# When ambient grad_mode is False
if new_frame.input_saver.grad_fn is None:
yield
return
with _checkpoint_hook(new_frame), forward_context:
yield
new_frame.forward_completed = True
if getattr(device_module, "_initialized", False) and \
preserve_rng_state and not had_device_in_fwd: # type: ignore[possibly-undefined]
# Device was not initialized before running the forward, so we didn't
# stash the device state.
raise RuntimeError(
"PyTorch's device state was initialized in the forward pass "
"of a Checkpoint, which is not allowed. Please open an issue "
"if you need this feature."
)
return
```
|
==================================================================================================================
SOURCE CODE FILE: collect_env.py
LINES: 18
SIZE: 24.46 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\collect_env.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# Unlike the rest of the PyTorch this file must be python2 compliant.
# This script outputs relevant system environment info
# Run it with `python collect_env.py` or `python -m torch.utils.collect_env`
import datetime
import json
import locale
import re
import subprocess
import sys
import os
from collections import namedtuple
try:
import torch
TORCH_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
TORCH_AVAILABLE = False
# System Environment Information
SystemEnv = namedtuple('SystemEnv', [
'torch_version',
'is_debug_build',
'cuda_compiled_version',
'gcc_version',
'clang_version',
'cmake_version',
'os',
'libc_version',
'python_version',
'python_platform',
'is_cuda_available',
'cuda_runtime_version',
'cuda_module_loading',
'nvidia_driver_version',
'nvidia_gpu_models',
'cudnn_version',
'pip_version', # 'pip' or 'pip3'
'pip_packages',
'conda_packages',
'hip_compiled_version',
'hip_runtime_version',
'miopen_runtime_version',
'caching_allocator_config',
'is_xnnpack_available',
'cpu_info',
])
COMMON_PATTERNS = [
"torch",
"numpy",
"triton",
"optree",
]
NVIDIA_PATTERNS = [
"cuda-cudart",
"cuda-cupti",
"cuda-libraries",
"cuda-opencl",
"cuda-nvrtc",
"cuda-runtime",
"cublas",
"cudnn",
"cufft",
"curand",
"cusolver",
"cusparse",
"nccl",
"nvjitlink",
"nvtx",
]
CONDA_PATTERNS = [
"cudatoolkit",
"soumith",
"mkl",
"magma",
]
PIP_PATTERNS = [
"mypy",
"flake8",
"onnx",
]
def run(command):
"""Return (return-code, stdout, stderr)."""
shell = True if type(command) is str else False
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell)
raw_output, raw_err = p.communicate()
rc = p.returncode
if get_platform() == 'win32':
enc = 'oem'
else:
enc = locale.getpreferredencoding()
output = raw_output.decode(enc)
err = raw_err.decode(enc)
return rc, output.strip(), err.strip()
def run_and_read_all(run_lambda, command):
"""Run command using run_lambda; reads and returns entire output if rc is 0."""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
return out
def run_and_parse_first_match(run_lambda, command, regex):
"""Run command using run_lambda, returns the first regex match if it exists."""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
match = re.search(regex, out)
if match is None:
return None
return match.group(1)
def run_and_return_first_line(run_lambda, command):
"""Run command using run_lambda and returns first line if output is not empty."""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
return out.split('\n')[0]
def get_conda_packages(run_lambda, patterns=None):
if patterns is None:
patterns = CONDA_PATTERNS + COMMON_PATTERNS + NVIDIA_PATTERNS
conda = os.environ.get('CONDA_EXE', 'conda')
out = run_and_read_all(run_lambda, "{} list".format(conda))
if out is None:
return out
return "\n".join(
line
for line in out.splitlines()
if not line.startswith("#")
and any(name in line for name in patterns)
)
def get_gcc_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'gcc --version', r'gcc (.*)')
def get_clang_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'clang --version', r'clang version (.*)')
def get_cmake_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'cmake --version', r'cmake (.*)')
def get_nvidia_driver_version(run_lambda):
if get_platform() == 'darwin':
cmd = 'kextstat | grep -i cuda'
return run_and_parse_first_match(run_lambda, cmd,
r'com[.]nvidia[.]CUDA [(](.*?)[)]')
smi = get_nvidia_smi()
return run_and_parse_first_match(run_lambda, smi, r'Driver Version: (.*?) ')
def get_gpu_info(run_lambda):
if get_platform() == 'darwin' or (TORCH_AVAILABLE and hasattr(torch.version, 'hip') and torch.version.hip is not None):
if TORCH_AVAILABLE and torch.cuda.is_available():
if torch.version.hip is not None:
prop = torch.cuda.get_device_properties(0)
if hasattr(prop, "gcnArchName"):
gcnArch = " ({})".format(prop.gcnArchName)
else:
gcnArch = "NoGCNArchNameOnOldPyTorch"
else:
gcnArch = ""
return torch.cuda.get_device_name(None) + gcnArch
return None
smi = get_nvidia_smi()
uuid_regex = re.compile(r' \(UUID: .+?\)')
rc, out, _ = run_lambda(smi + ' -L')
if rc != 0:
return None
# Anonymize GPUs by removing their UUID
return re.sub(uuid_regex, '', out)
def get_running_cuda_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'nvcc --version', r'release .+ V(.*)')
def get_cudnn_version(run_lambda):
"""Return a list of libcudnn.so; it's hard to tell which one is being used."""
if get_platform() == 'win32':
system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
cuda_path = os.environ.get('CUDA_PATH', "%CUDA_PATH%")
where_cmd = os.path.join(system_root, 'System32', 'where')
cudnn_cmd = '{} /R "{}\\bin" cudnn*.dll'.format(where_cmd, cuda_path)
elif get_platform() == 'darwin':
# CUDA libraries and drivers can be found in /usr/local/cuda/. See
# https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install
# https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac
# Use CUDNN_LIBRARY when cudnn library is installed elsewhere.
cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*'
else:
cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev'
rc, out, _ = run_lambda(cudnn_cmd)
# find will return 1 if there are permission errors or if not found
if len(out) == 0 or (rc != 1 and rc != 0):
l = os.environ.get('CUDNN_LIBRARY')
if l is not None and os.path.isfile(l):
return os.path.realpath(l)
return None
files_set = set()
for fn in out.split('\n'):
fn = os.path.realpath(fn) # eliminate symbolic links
if os.path.isfile(fn):
files_set.add(fn)
if not files_set:
return None
# Alphabetize the result because the order is non-deterministic otherwise
files = sorted(files_set)
if len(files) == 1:
return files[0]
result = '\n'.join(files)
return 'Probably one of the following:\n{}'.format(result)
def get_nvidia_smi():
# Note: nvidia-smi is currently available only on Windows and Linux
smi = 'nvidia-smi'
if get_platform() == 'win32':
system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
program_files_root = os.environ.get('PROGRAMFILES', 'C:\\Program Files')
legacy_path = os.path.join(program_files_root, 'NVIDIA Corporation', 'NVSMI', smi)
new_path = os.path.join(system_root, 'System32', smi)
smis = [new_path, legacy_path]
for candidate_smi in smis:
if os.path.exists(candidate_smi):
smi = '"{}"'.format(candidate_smi)
break
return smi
# example outputs of CPU infos
# * linux
# Architecture: x86_64
# CPU op-mode(s): 32-bit, 64-bit
# Address sizes: 46 bits physical, 48 bits virtual
# Byte Order: Little Endian
# CPU(s): 128
# On-line CPU(s) list: 0-127
# Vendor ID: GenuineIntel
# Model name: Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
# CPU family: 6
# Model: 106
# Thread(s) per core: 2
# Core(s) per socket: 32
# Socket(s): 2
# Stepping: 6
# BogoMIPS: 5799.78
# Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr
# sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl
# xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq monitor ssse3 fma cx16
# pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand
# hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced
# fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap
# avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1
# xsaves wbnoinvd ida arat avx512vbmi pku ospke avx512_vbmi2 gfni vaes vpclmulqdq
# avx512_vnni avx512_bitalg tme avx512_vpopcntdq rdpid md_clear flush_l1d arch_capabilities
# Virtualization features:
# Hypervisor vendor: KVM
# Virtualization type: full
# Caches (sum of all):
# L1d: 3 MiB (64 instances)
# L1i: 2 MiB (64 instances)
# L2: 80 MiB (64 instances)
# L3: 108 MiB (2 instances)
# NUMA:
# NUMA node(s): 2
# NUMA node0 CPU(s): 0-31,64-95
# NUMA node1 CPU(s): 32-63,96-127
# Vulnerabilities:
# Itlb multihit: Not affected
# L1tf: Not affected
# Mds: Not affected
# Meltdown: Not affected
# Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
# Retbleed: Not affected
# Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
# Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
# Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence
# Srbds: Not affected
# Tsx async abort: Not affected
# * win32
# Architecture=9
# CurrentClockSpeed=2900
# DeviceID=CPU0
# Family=179
# L2CacheSize=40960
# L2CacheSpeed=
# Manufacturer=GenuineIntel
# MaxClockSpeed=2900
# Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
# ProcessorType=3
# Revision=27142
#
# Architecture=9
# CurrentClockSpeed=2900
# DeviceID=CPU1
# Family=179
# L2CacheSize=40960
# L2CacheSpeed=
# Manufacturer=GenuineIntel
# MaxClockSpeed=2900
# Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
# ProcessorType=3
# Revision=27142
def get_cpu_info(run_lambda):
rc, out, err = 0, '', ''
if get_platform() == 'linux':
rc, out, err = run_lambda('lscpu')
elif get_platform() == 'win32':
rc, out, err = run_lambda(
'powershell.exe "gwmi -Class Win32_Processor | Select-Object -Property Name,Manufacturer,Family,\
Architecture,ProcessorType,DeviceID,CurrentClockSpeed,MaxClockSpeed,L2CacheSize,L2CacheSpeed,Revision\
| ConvertTo-Json"'
)
if rc == 0:
lst = []
try:
obj = json.loads(out)
if type(obj) is list:
for o in obj:
lst.append("----------------------")
lst.extend([f"{k}: {v}" for (k, v) in o.items()])
else:
lst.extend([f"{k}: {v}" for (k, v) in obj.items()])
except ValueError as e:
lst.append(out)
lst.append(str(e))
out = "\n".join(lst)
elif get_platform() == 'darwin':
rc, out, err = run_lambda("sysctl -n machdep.cpu.brand_string")
cpu_info = 'None'
if rc == 0:
cpu_info = out
else:
cpu_info = err
return cpu_info
def get_platform():
if sys.platform.startswith('linux'):
return 'linux'
elif sys.platform.startswith('win32'):
return 'win32'
elif sys.platform.startswith('cygwin'):
return 'cygwin'
elif sys.platform.startswith('darwin'):
return 'darwin'
else:
return sys.platform
def get_mac_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', r'(.*)')
def get_windows_version(run_lambda):
ret = run_and_read_all(
run_lambda,
'powershell.exe "gwmi -Class Win32_OperatingSystem | Select-Object -Property Caption,\
OSArchitecture,Version | ConvertTo-Json"',
)
try:
obj = json.loads(ret)
ret = f'{obj["Caption"]} ({obj["Version"]} {obj["OSArchitecture"]})'
except ValueError as e:
ret += f"\n{str(e)}"
return ret
def get_lsb_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'lsb_release -a', r'Description:\t(.*)')
def check_release_file(run_lambda):
return run_and_parse_first_match(run_lambda, 'cat /etc/*-release',
r'PRETTY_NAME="(.*)"')
def get_os(run_lambda):
from platform import machine
platform = get_platform()
if platform == 'win32' or platform == 'cygwin':
return get_windows_version(run_lambda)
if platform == 'darwin':
version = get_mac_version(run_lambda)
if version is None:
return None
return 'macOS {} ({})'.format(version, machine())
if platform == 'linux':
# Ubuntu/Debian based
desc = get_lsb_version(run_lambda)
if desc is not None:
return '{} ({})'.format(desc, machine())
# Try reading /etc/*-release
desc = check_release_file(run_lambda)
if desc is not None:
return '{} ({})'.format(desc, machine())
return '{} ({})'.format(platform, machine())
# Unknown platform
return platform
def get_python_platform():
import platform
return platform.platform()
def get_libc_version():
import platform
if get_platform() != 'linux':
return 'N/A'
return '-'.join(platform.libc_ver())
def get_pip_packages(run_lambda, patterns=None):
"""Return `pip list` output. Note: will also find conda-installed pytorch and numpy packages."""
if patterns is None:
patterns = PIP_PATTERNS + COMMON_PATTERNS + NVIDIA_PATTERNS
pip_version = 'pip3' if sys.version[0] == '3' else 'pip'
os.environ['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
# People generally have pip as `pip` or `pip3`
# But here it is invoked as `python -mpip`
out = run_and_read_all(run_lambda, [sys.executable, '-mpip', 'list', '--format=freeze'])
filtered_out = '\n'.join(
line
for line in out.splitlines()
if any(name in line for name in patterns)
)
return pip_version, filtered_out
def get_cachingallocator_config():
ca_config = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', '')
return ca_config
def get_cuda_module_loading_config():
if TORCH_AVAILABLE and torch.cuda.is_available():
torch.cuda.init()
config = os.environ.get('CUDA_MODULE_LOADING', '')
return config
else:
return "N/A"
def is_xnnpack_available():
if TORCH_AVAILABLE:
import torch.backends.xnnpack
return str(torch.backends.xnnpack.enabled) # type: ignore[attr-defined]
else:
return "N/A"
def get_env_info():
"""
Collects environment information to aid in debugging.
The returned environment information contains details on torch version, is debug build
or not, cuda compiled version, gcc version, clang version, cmake version, operating
system, libc version, python version, python platform, CUDA availability, CUDA
runtime version, CUDA module loading config, GPU model and configuration, Nvidia
driver version, cuDNN version, pip version and versions of relevant pip and
conda packages, HIP runtime version, MIOpen runtime version,
Caching allocator config, XNNPACK availability and CPU information.
Returns:
SystemEnv (namedtuple): A tuple containining various environment details
and system information.
"""
run_lambda = run
pip_version, pip_list_output = get_pip_packages(run_lambda)
if TORCH_AVAILABLE:
version_str = torch.__version__
debug_mode_str = str(torch.version.debug)
cuda_available_str = str(torch.cuda.is_available())
cuda_version_str = torch.version.cuda
if not hasattr(torch.version, 'hip') or torch.version.hip is None: # cuda version
hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
else: # HIP version
def get_version_or_na(cfg, prefix):
_lst = [s.rsplit(None, 1)[-1] for s in cfg if prefix in s]
return _lst[0] if _lst else 'N/A'
cfg = torch._C._show_config().split('\n')
hip_runtime_version = get_version_or_na(cfg, 'HIP Runtime')
miopen_runtime_version = get_version_or_na(cfg, 'MIOpen')
cuda_version_str = 'N/A'
hip_compiled_version = torch.version.hip
else:
version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A'
hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
sys_version = sys.version.replace("\n", " ")
conda_packages = get_conda_packages(run_lambda)
return SystemEnv(
torch_version=version_str,
is_debug_build=debug_mode_str,
python_version='{} ({}-bit runtime)'.format(sys_version, sys.maxsize.bit_length() + 1),
python_platform=get_python_platform(),
is_cuda_available=cuda_available_str,
cuda_compiled_version=cuda_version_str,
cuda_runtime_version=get_running_cuda_version(run_lambda),
cuda_module_loading=get_cuda_module_loading_config(),
nvidia_gpu_models=get_gpu_info(run_lambda),
nvidia_driver_version=get_nvidia_driver_version(run_lambda),
cudnn_version=get_cudnn_version(run_lambda),
hip_compiled_version=hip_compiled_version,
hip_runtime_version=hip_runtime_version,
miopen_runtime_version=miopen_runtime_version,
pip_version=pip_version,
pip_packages=pip_list_output,
conda_packages=conda_packages,
os=get_os(run_lambda),
libc_version=get_libc_version(),
gcc_version=get_gcc_version(run_lambda),
clang_version=get_clang_version(run_lambda),
cmake_version=get_cmake_version(run_lambda),
caching_allocator_config=get_cachingallocator_config(),
is_xnnpack_available=is_xnnpack_available(),
cpu_info=get_cpu_info(run_lambda),
)
env_info_fmt = """
PyTorch version: {torch_version}
Is debug build: {is_debug_build}
CUDA used to build PyTorch: {cuda_compiled_version}
ROCM used to build PyTorch: {hip_compiled_version}
OS: {os}
GCC version: {gcc_version}
Clang version: {clang_version}
CMake version: {cmake_version}
Libc version: {libc_version}
Python version: {python_version}
Python platform: {python_platform}
Is CUDA available: {is_cuda_available}
CUDA runtime version: {cuda_runtime_version}
CUDA_MODULE_LOADING set to: {cuda_module_loading}
GPU models and configuration: {nvidia_gpu_models}
Nvidia driver version: {nvidia_driver_version}
cuDNN version: {cudnn_version}
HIP runtime version: {hip_runtime_version}
MIOpen runtime version: {miopen_runtime_version}
Is XNNPACK available: {is_xnnpack_available}
CPU:
{cpu_info}
Versions of relevant libraries:
{pip_packages}
{conda_packages}
""".strip()
def pretty_str(envinfo):
def replace_nones(dct, replacement='Could not collect'):
for key in dct.keys():
if dct[key] is not None:
continue
dct[key] = replacement
return dct
def replace_bools(dct, true='Yes', false='No'):
for key in dct.keys():
if dct[key] is True:
dct[key] = true
elif dct[key] is False:
dct[key] = false
return dct
def prepend(text, tag='[prepend]'):
lines = text.split('\n')
updated_lines = [tag + line for line in lines]
return '\n'.join(updated_lines)
def replace_if_empty(text, replacement='No relevant packages'):
if text is not None and len(text) == 0:
return replacement
return text
def maybe_start_on_next_line(string):
# If `string` is multiline, prepend a \n to it.
if string is not None and len(string.split('\n')) > 1:
return '\n{}\n'.format(string)
return string
mutable_dict = envinfo._asdict()
# If nvidia_gpu_models is multiline, start on the next line
mutable_dict['nvidia_gpu_models'] = \
maybe_start_on_next_line(envinfo.nvidia_gpu_models)
# If the machine doesn't have CUDA, report some fields as 'No CUDA'
dynamic_cuda_fields = [
'cuda_runtime_version',
'nvidia_gpu_models',
'nvidia_driver_version',
]
all_cuda_fields = dynamic_cuda_fields + ['cudnn_version']
all_dynamic_cuda_fields_missing = all(
mutable_dict[field] is None for field in dynamic_cuda_fields)
if TORCH_AVAILABLE and not torch.cuda.is_available() and all_dynamic_cuda_fields_missing:
for field in all_cuda_fields:
mutable_dict[field] = 'No CUDA'
if envinfo.cuda_compiled_version is None:
mutable_dict['cuda_compiled_version'] = 'None'
# Replace True with Yes, False with No
mutable_dict = replace_bools(mutable_dict)
# Replace all None objects with 'Could not collect'
mutable_dict = replace_nones(mutable_dict)
# If either of these are '', replace with 'No relevant packages'
mutable_dict['pip_packages'] = replace_if_empty(mutable_dict['pip_packages'])
mutable_dict['conda_packages'] = replace_if_empty(mutable_dict['conda_packages'])
# Tag conda and pip packages with a prefix
# If they were previously None, they'll show up as ie '[conda] Could not collect'
if mutable_dict['pip_packages']:
mutable_dict['pip_packages'] = prepend(mutable_dict['pip_packages'],
'[{}] '.format(envinfo.pip_version))
if mutable_dict['conda_packages']:
mutable_dict['conda_packages'] = prepend(mutable_dict['conda_packages'],
'[conda] ')
mutable_dict['cpu_info'] = envinfo.cpu_info
return env_info_fmt.format(**mutable_dict)
def get_pretty_env_info():
"""
Returns a pretty string of environment information.
This function retrieves environment information by calling the `get_env_info` function
and then formats the information into a human-readable string. The retrieved environment
information is listed in the document of `get_env_info`.
This function is used in `python collect_env.py` that should be executed when reporting a bug.
Returns:
str: A pretty string of the environment information.
"""
return pretty_str(get_env_info())
def main():
print("Collecting environment information...")
output = get_pretty_env_info()
print(output)
if TORCH_AVAILABLE and hasattr(torch, 'utils') and hasattr(torch.utils, '_crash_handler'):
minidump_dir = torch.utils._crash_handler.DEFAULT_MINIDUMP_DIR
if sys.platform == "linux" and os.path.exists(minidump_dir):
dumps = [os.path.join(minidump_dir, dump) for dump in os.listdir(minidump_dir)]
latest = max(dumps, key=os.path.getctime)
ctime = os.path.getctime(latest)
creation_time = datetime.datetime.fromtimestamp(ctime).strftime('%Y-%m-%d %H:%M:%S')
msg = "\n*** Detected a minidump at {} created on {}, ".format(latest, creation_time) + \
"if this is related to your bug please include it when you file a report ***"
print(msg, file=sys.stderr)
if __name__ == '__main__':
main()
```
|
====================================================================================================================
SOURCE CODE FILE: cpp_backtrace.py
LINES: 1
SIZE: 0.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\cpp_backtrace.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from torch._C import _get_cpp_backtrace
def get_cpp_backtrace(frames_to_skip=0, maximum_number_of_frames=64) -> str:
r"""
Return a string containing the C++ stack trace of the current thread.
Args:
frames_to_skip (int): the number of frames to skip from the top of the stack
maximum_number_of_frames (int): the maximum number of frames to return
"""
return _get_cpp_backtrace(frames_to_skip, maximum_number_of_frames)
```
|
====================================================================================================================
SOURCE CODE FILE: cpp_extension.py
LINES: 11
SIZE: 127.31 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\cpp_extension.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import copy
import glob
import importlib
import importlib.abc
import os
import re
import shlex
import shutil
import setuptools
import subprocess
import sys
import sysconfig
import warnings
import collections
from pathlib import Path
import errno
import torch
import torch._appdirs
from .file_baton import FileBaton
from ._cpp_extension_versioner import ExtensionVersioner
from .hipify import hipify_python
from .hipify.hipify_python import GeneratedFileCleaner
from typing import Optional, Union
from torch.torch_version import TorchVersion, Version
from setuptools.command.build_ext import build_ext
IS_WINDOWS = sys.platform == 'win32'
IS_MACOS = sys.platform.startswith('darwin')
IS_LINUX = sys.platform.startswith('linux')
LIB_EXT = '.pyd' if IS_WINDOWS else '.so'
EXEC_EXT = '.exe' if IS_WINDOWS else ''
CLIB_PREFIX = '' if IS_WINDOWS else 'lib'
CLIB_EXT = '.dll' if IS_WINDOWS else '.so'
SHARED_FLAG = '/DLL' if IS_WINDOWS else '-shared'
_HERE = os.path.abspath(__file__)
_TORCH_PATH = os.path.dirname(os.path.dirname(_HERE))
TORCH_LIB_PATH = os.path.join(_TORCH_PATH, 'lib')
SUBPROCESS_DECODE_ARGS = ('oem',) if IS_WINDOWS else ()
MINIMUM_GCC_VERSION = (5, 0, 0)
MINIMUM_MSVC_VERSION = (19, 0, 24215)
VersionRange = tuple[tuple[int, ...], tuple[int, ...]]
VersionMap = dict[str, VersionRange]
# The following values were taken from the following GitHub gist that
# summarizes the minimum valid major versions of g++/clang++ for each supported
# CUDA version: https://gist.github.com/ax3l/9489132
# Or from include/crt/host_config.h in the CUDA SDK
# The second value is the exclusive(!) upper bound, i.e. min <= version < max
CUDA_GCC_VERSIONS: VersionMap = {
'11.0': (MINIMUM_GCC_VERSION, (10, 0)),
'11.1': (MINIMUM_GCC_VERSION, (11, 0)),
'11.2': (MINIMUM_GCC_VERSION, (11, 0)),
'11.3': (MINIMUM_GCC_VERSION, (11, 0)),
'11.4': ((6, 0, 0), (12, 0)),
'11.5': ((6, 0, 0), (12, 0)),
'11.6': ((6, 0, 0), (12, 0)),
'11.7': ((6, 0, 0), (12, 0)),
}
MINIMUM_CLANG_VERSION = (3, 3, 0)
CUDA_CLANG_VERSIONS: VersionMap = {
'11.1': (MINIMUM_CLANG_VERSION, (11, 0)),
'11.2': (MINIMUM_CLANG_VERSION, (12, 0)),
'11.3': (MINIMUM_CLANG_VERSION, (12, 0)),
'11.4': (MINIMUM_CLANG_VERSION, (13, 0)),
'11.5': (MINIMUM_CLANG_VERSION, (13, 0)),
'11.6': (MINIMUM_CLANG_VERSION, (14, 0)),
'11.7': (MINIMUM_CLANG_VERSION, (14, 0)),
}
__all__ = ["get_default_build_root", "check_compiler_ok_for_platform", "get_compiler_abi_compatibility_and_version", "BuildExtension",
"CppExtension", "CUDAExtension", "SyclExtension", "include_paths", "library_paths", "load", "load_inline", "is_ninja_available",
"verify_ninja_availability", "remove_extension_h_precompiler_headers", "get_cxx_compiler", "check_compiler_is_gcc"]
# Taken directly from python stdlib < 3.9
# See https://github.com/pytorch/pytorch/issues/48617
def _nt_quote_args(args: Optional[list[str]]) -> list[str]:
"""Quote command-line arguments for DOS/Windows conventions.
Just wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# Cover None-type
if not args:
return []
return [f'"{arg}"' if ' ' in arg else arg for arg in args]
def _find_cuda_home() -> Optional[str]:
"""Find the CUDA install path."""
# Guess #1
cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
if cuda_home is None:
# Guess #2
nvcc_path = shutil.which("nvcc")
if nvcc_path is not None:
cuda_home = os.path.dirname(os.path.dirname(nvcc_path))
else:
# Guess #3
if IS_WINDOWS:
cuda_homes = glob.glob(
'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*')
if len(cuda_homes) == 0:
cuda_home = ''
else:
cuda_home = cuda_homes[0]
else:
cuda_home = '/usr/local/cuda'
if not os.path.exists(cuda_home):
cuda_home = None
if cuda_home and not torch.cuda.is_available():
print(f"No CUDA runtime is found, using CUDA_HOME='{cuda_home}'",
file=sys.stderr)
return cuda_home
def _find_rocm_home() -> Optional[str]:
"""Find the ROCm install path."""
# Guess #1
rocm_home = os.environ.get('ROCM_HOME') or os.environ.get('ROCM_PATH')
if rocm_home is None:
# Guess #2
hipcc_path = shutil.which('hipcc')
if hipcc_path is not None:
rocm_home = os.path.dirname(os.path.dirname(
os.path.realpath(hipcc_path)))
# can be either <ROCM_HOME>/hip/bin/hipcc or <ROCM_HOME>/bin/hipcc
if os.path.basename(rocm_home) == 'hip':
rocm_home = os.path.dirname(rocm_home)
else:
# Guess #3
fallback_path = '/opt/rocm'
if os.path.exists(fallback_path):
rocm_home = fallback_path
if rocm_home and torch.version.hip is None:
print(f"No ROCm runtime is found, using ROCM_HOME='{rocm_home}'",
file=sys.stderr)
return rocm_home
def _find_sycl_home() -> Optional[str]:
sycl_home = None
icpx_path = shutil.which('icpx')
# Guess 1: for source code build developer/user, we'll have icpx in PATH,
# which will tell us the SYCL_HOME location.
if icpx_path is not None:
sycl_home = os.path.dirname(os.path.dirname(
os.path.realpath(icpx_path)))
# Guess 2: for users install Pytorch with XPU support, the sycl runtime is
# inside intel-sycl-rt, which is automatically installed via pip dependency.
else:
try:
files = importlib.metadata.files('intel-sycl-rt') or []
for f in files:
if f.name == "libsycl.so":
sycl_home = os.path.dirname(Path(f.locate()).parent.resolve())
break
except importlib.metadata.PackageNotFoundError:
print("Trying to find SYCL_HOME from intel-sycl-rt package, but it is not installed.",
file=sys.stderr)
return sycl_home
def _join_rocm_home(*paths) -> str:
"""
Join paths with ROCM_HOME, or raises an error if it ROCM_HOME is not set.
This is basically a lazy way of raising an error for missing $ROCM_HOME
only once we need to get any ROCm-specific path.
"""
if ROCM_HOME is None:
raise OSError('ROCM_HOME environment variable is not set. '
'Please set it to your ROCm install root.')
elif IS_WINDOWS:
raise OSError('Building PyTorch extensions using '
'ROCm and Windows is not supported.')
return os.path.join(ROCM_HOME, *paths)
def _join_sycl_home(*paths) -> str:
"""
Join paths with SYCL_HOME, or raises an error if it SYCL_HOME is not found.
This is basically a lazy way of raising an error for missing SYCL_HOME
only once we need to get any SYCL-specific path.
"""
if SYCL_HOME is None:
raise OSError('SYCL runtime is not dected. Please setup the pytorch '
'prerequisites for Intel GPU following the instruction in '
'https://github.com/pytorch/pytorch?tab=readme-ov-file#intel-gpu-support '
'or install intel-sycl-rt via pip.')
return os.path.join(SYCL_HOME, *paths)
ABI_INCOMPATIBILITY_WARNING = '''
!! WARNING !!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Your compiler ({}) may be ABI-incompatible with PyTorch!
Please use a compiler that is ABI-compatible with GCC 5.0 and above.
See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html.
See https://gist.github.com/goldsborough/d466f43e8ffc948ff92de7486c5216d6
for instructions on how to install GCC 5 or higher.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!! WARNING !!
'''
WRONG_COMPILER_WARNING = '''
!! WARNING !!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Your compiler ({user_compiler}) is not compatible with the compiler Pytorch was
built with for this platform, which is {pytorch_compiler} on {platform}. Please
use {pytorch_compiler} to to compile your extension. Alternatively, you may
compile PyTorch from source using {user_compiler}, and then you can also use
{user_compiler} to compile your extension.
See https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md for help
with compiling PyTorch from source.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!! WARNING !!
'''
CUDA_MISMATCH_MESSAGE = '''
The detected CUDA version ({0}) mismatches the version that was used to compile
PyTorch ({1}). Please make sure to use the same CUDA versions.
'''
CUDA_MISMATCH_WARN = "The detected CUDA version ({0}) has a minor version mismatch with the version that was used to compile PyTorch ({1}). Most likely this shouldn't be a problem."
CUDA_NOT_FOUND_MESSAGE = '''
CUDA was not found on the system, please set the CUDA_HOME or the CUDA_PATH
environment variable or add NVCC to your system PATH. The extension compilation will fail.
'''
ROCM_HOME = _find_rocm_home()
HIP_HOME = _join_rocm_home('hip') if ROCM_HOME else None
IS_HIP_EXTENSION = True if ((ROCM_HOME is not None) and (torch.version.hip is not None)) else False
ROCM_VERSION = None
if torch.version.hip is not None:
ROCM_VERSION = tuple(int(v) for v in torch.version.hip.split('.')[:2])
CUDA_HOME = _find_cuda_home() if torch.cuda._is_compiled() else None
CUDNN_HOME = os.environ.get('CUDNN_HOME') or os.environ.get('CUDNN_PATH')
SYCL_HOME = _find_sycl_home() if torch.xpu._is_compiled() else None
# PyTorch releases have the version pattern major.minor.patch, whereas when
# PyTorch is built from source, we append the git commit hash, which gives
# it the below pattern.
BUILT_FROM_SOURCE_VERSION_PATTERN = re.compile(r'\d+\.\d+\.\d+\w+\+\w+')
COMMON_MSVC_FLAGS = ['/MD', '/wd4819', '/wd4251', '/wd4244', '/wd4267', '/wd4275', '/wd4018', '/wd4190', '/wd4624', '/wd4067', '/wd4068', '/EHsc']
MSVC_IGNORE_CUDAFE_WARNINGS = [
'base_class_has_different_dll_interface',
'field_without_dll_interface',
'dll_interface_conflict_none_assumed',
'dll_interface_conflict_dllexport_assumed'
]
COMMON_NVCC_FLAGS = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_BFLOAT16_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
'--expt-relaxed-constexpr'
]
COMMON_HIP_FLAGS = [
'-fPIC',
'-D__HIP_PLATFORM_AMD__=1',
'-DUSE_ROCM=1',
'-DHIPBLAS_V2',
]
COMMON_HIPCC_FLAGS = [
'-DCUDA_HAS_FP16=1',
'-D__HIP_NO_HALF_OPERATORS__=1',
'-D__HIP_NO_HALF_CONVERSIONS__=1',
]
_COMMON_SYCL_FLAGS = [
'-fsycl',
'-fsycl-targets=spir64_gen,spir64',
]
def _get_sycl_arch_list():
if 'TORCH_XPU_ARCH_LIST' in os.environ:
return os.environ.get('TORCH_XPU_ARCH_LIST')
arch_list = torch.xpu.get_arch_list()
# Dropping dg2-* archs since they lack hardware support for fp64 and require
# special consideration from the user. If needed these platforms can
# be requested thru TORCH_XPU_ARCH_LIST environment variable.
arch_list = [x for x in arch_list if not x.startswith('dg2-')]
return ','.join(arch_list)
_SYCL_DLINK_FLAGS = [
*_COMMON_SYCL_FLAGS,
'-fsycl-link',
'--offload-compress',
f'-Xs "-device {_get_sycl_arch_list()}"',
]
JIT_EXTENSION_VERSIONER = ExtensionVersioner()
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'x86_amd64',
}
min_supported_cpython = "0x03090000" # Python 3.9 hexcode
def get_cxx_compiler():
if IS_WINDOWS:
compiler = os.environ.get('CXX', 'cl')
else:
compiler = os.environ.get('CXX', 'c++')
return compiler
def _is_binary_build() -> bool:
return not BUILT_FROM_SOURCE_VERSION_PATTERN.match(torch.version.__version__)
def _accepted_compilers_for_platform() -> list[str]:
# gnu-c++ and gnu-cc are the conda gcc compilers
return ['clang++', 'clang'] if IS_MACOS else ['g++', 'gcc', 'gnu-c++', 'gnu-cc', 'clang++', 'clang']
def _maybe_write(filename, new_content):
r'''
Equivalent to writing the content into the file but will not touch the file
if it already had the right content (to avoid triggering recompile).
'''
if os.path.exists(filename):
with open(filename) as f:
content = f.read()
if content == new_content:
# The file already contains the right thing!
return
with open(filename, 'w') as source_file:
source_file.write(new_content)
def get_default_build_root() -> str:
"""
Return the path to the root folder under which extensions will built.
For each extension module built, there will be one folder underneath the
folder returned by this function. For example, if ``p`` is the path
returned by this function and ``ext`` the name of an extension, the build
folder for the extension will be ``p/ext``.
This directory is **user-specific** so that multiple users on the same
machine won't meet permission issues.
"""
return os.path.realpath(torch._appdirs.user_cache_dir(appname='torch_extensions'))
def check_compiler_ok_for_platform(compiler: str) -> bool:
"""
Verify that the compiler is the expected one for the current platform.
Args:
compiler (str): The compiler executable to check.
Returns:
True if the compiler is gcc/g++ on Linux or clang/clang++ on macOS,
and always True for Windows.
"""
if IS_WINDOWS:
return True
compiler_path = shutil.which(compiler)
if compiler_path is None:
return False
# Use os.path.realpath to resolve any symlinks, in particular from 'c++' to e.g. 'g++'.
compiler_path = os.path.realpath(compiler_path)
# Check the compiler name
if any(name in compiler_path for name in _accepted_compilers_for_platform()):
return True
# If compiler wrapper is used try to infer the actual compiler by invoking it with -v flag
env = os.environ.copy()
env['LC_ALL'] = 'C' # Don't localize output
version_string = subprocess.check_output([compiler, '-v'], stderr=subprocess.STDOUT, env=env).decode(*SUBPROCESS_DECODE_ARGS)
if IS_LINUX:
# Check for 'gcc' or 'g++' for sccache wrapper
pattern = re.compile("^COLLECT_GCC=(.*)$", re.MULTILINE)
results = re.findall(pattern, version_string)
if len(results) != 1:
# Clang is also a supported compiler on Linux
# Though on Ubuntu it's sometimes called "Ubuntu clang version"
return 'clang version' in version_string
compiler_path = os.path.realpath(results[0].strip())
# On RHEL/CentOS c++ is a gcc compiler wrapper
if os.path.basename(compiler_path) == 'c++' and 'gcc version' in version_string:
return True
return any(name in compiler_path for name in _accepted_compilers_for_platform())
if IS_MACOS:
# Check for 'clang' or 'clang++'
return version_string.startswith("Apple clang")
return False
def get_compiler_abi_compatibility_and_version(compiler) -> tuple[bool, TorchVersion]:
"""
Determine if the given compiler is ABI-compatible with PyTorch alongside its version.
Args:
compiler (str): The compiler executable name to check (e.g. ``g++``).
Must be executable in a shell process.
Returns:
A tuple that contains a boolean that defines if the compiler is (likely) ABI-incompatible with PyTorch,
followed by a `TorchVersion` string that contains the compiler version separated by dots.
"""
if not _is_binary_build():
return (True, TorchVersion('0.0.0'))
if os.environ.get('TORCH_DONT_CHECK_COMPILER_ABI') in ['ON', '1', 'YES', 'TRUE', 'Y']:
return (True, TorchVersion('0.0.0'))
# First check if the compiler is one of the expected ones for the particular platform.
if not check_compiler_ok_for_platform(compiler):
warnings.warn(WRONG_COMPILER_WARNING.format(
user_compiler=compiler,
pytorch_compiler=_accepted_compilers_for_platform()[0],
platform=sys.platform))
return (False, TorchVersion('0.0.0'))
if IS_MACOS:
# There is no particular minimum version we need for clang, so we're good here.
return (True, TorchVersion('0.0.0'))
try:
if IS_LINUX:
minimum_required_version = MINIMUM_GCC_VERSION
versionstr = subprocess.check_output([compiler, '-dumpfullversion', '-dumpversion'])
version = versionstr.decode(*SUBPROCESS_DECODE_ARGS).strip().split('.')
else:
minimum_required_version = MINIMUM_MSVC_VERSION
compiler_info = subprocess.check_output(compiler, stderr=subprocess.STDOUT)
match = re.search(r'(\d+)\.(\d+)\.(\d+)', compiler_info.decode(*SUBPROCESS_DECODE_ARGS).strip())
version = ['0', '0', '0'] if match is None else list(match.groups())
except Exception:
_, error, _ = sys.exc_info()
warnings.warn(f'Error checking compiler version for {compiler}: {error}')
return (False, TorchVersion('0.0.0'))
if tuple(map(int, version)) >= minimum_required_version:
return (True, TorchVersion('.'.join(version)))
compiler = f'{compiler} {".".join(version)}'
warnings.warn(ABI_INCOMPATIBILITY_WARNING.format(compiler))
return (False, TorchVersion('.'.join(version)))
def _check_cuda_version(compiler_name: str, compiler_version: TorchVersion) -> None:
if not CUDA_HOME:
raise RuntimeError(CUDA_NOT_FOUND_MESSAGE)
nvcc = os.path.join(CUDA_HOME, 'bin', 'nvcc')
cuda_version_str = subprocess.check_output([nvcc, '--version']).strip().decode(*SUBPROCESS_DECODE_ARGS)
cuda_version = re.search(r'release (\d+[.]\d+)', cuda_version_str)
if cuda_version is None:
return
cuda_str_version = cuda_version.group(1)
cuda_ver = Version(cuda_str_version)
if torch.version.cuda is None:
return
torch_cuda_version = Version(torch.version.cuda)
if cuda_ver != torch_cuda_version:
# major/minor attributes are only available in setuptools>=49.4.0
if getattr(cuda_ver, "major", None) is None:
raise ValueError("setuptools>=49.4.0 is required")
if cuda_ver.major != torch_cuda_version.major:
raise RuntimeError(CUDA_MISMATCH_MESSAGE.format(cuda_str_version, torch.version.cuda))
warnings.warn(CUDA_MISMATCH_WARN.format(cuda_str_version, torch.version.cuda))
if not (sys.platform.startswith('linux') and
os.environ.get('TORCH_DONT_CHECK_COMPILER_ABI') not in ['ON', '1', 'YES', 'TRUE', 'Y'] and
_is_binary_build()):
return
cuda_compiler_bounds: VersionMap = CUDA_CLANG_VERSIONS if compiler_name.startswith('clang') else CUDA_GCC_VERSIONS
if cuda_str_version not in cuda_compiler_bounds:
warnings.warn(f'There are no {compiler_name} version bounds defined for CUDA version {cuda_str_version}')
else:
min_compiler_version, max_excl_compiler_version = cuda_compiler_bounds[cuda_str_version]
# Special case for 11.4.0, which has lower compiler bounds than 11.4.1
if "V11.4.48" in cuda_version_str and cuda_compiler_bounds == CUDA_GCC_VERSIONS:
max_excl_compiler_version = (11, 0)
min_compiler_version_str = '.'.join(map(str, min_compiler_version))
max_excl_compiler_version_str = '.'.join(map(str, max_excl_compiler_version))
version_bound_str = f'>={min_compiler_version_str}, <{max_excl_compiler_version_str}'
if compiler_version < TorchVersion(min_compiler_version_str):
raise RuntimeError(
f'The current installed version of {compiler_name} ({compiler_version}) is less '
f'than the minimum required version by CUDA {cuda_str_version} ({min_compiler_version_str}). '
f'Please make sure to use an adequate version of {compiler_name} ({version_bound_str}).'
)
if compiler_version >= TorchVersion(max_excl_compiler_version_str):
raise RuntimeError(
f'The current installed version of {compiler_name} ({compiler_version}) is greater '
f'than the maximum required version by CUDA {cuda_str_version}. '
f'Please make sure to use an adequate version of {compiler_name} ({version_bound_str}).'
)
def _append_sycl_std_if_no_std_present(cflags):
if not any(flag.startswith('-sycl-std=') for flag in cflags):
cflags.append('-sycl-std=2020')
def _wrap_sycl_host_flags(cflags):
host_cxx = get_cxx_compiler()
host_cflags = [
f'-fsycl-host-compiler={host_cxx}',
shlex.quote(f'-fsycl-host-compiler-options={cflags}'),
]
return host_cflags
class BuildExtension(build_ext):
"""
A custom :mod:`setuptools` build extension .
This :class:`setuptools.build_ext` subclass takes care of passing the
minimum required compiler flags (e.g. ``-std=c++17``) as well as mixed
C++/CUDA/SYCL compilation (and support for CUDA/SYCL files in general).
When using :class:`BuildExtension`, it is allowed to supply a dictionary
for ``extra_compile_args`` (rather than the usual list) that maps from
languages/compilers (the only expected values are ``cxx``, ``nvcc`` or
``sycl``) to a list of additional compiler flags to supply to the compiler.
This makes it possible to supply different flags to the C++, CUDA and SYCL
compiler during mixed compilation.
``use_ninja`` (bool): If ``use_ninja`` is ``True`` (default), then we
attempt to build using the Ninja backend. Ninja greatly speeds up
compilation compared to the standard ``setuptools.build_ext``.
Fallbacks to the standard distutils backend if Ninja is not available.
.. note::
By default, the Ninja backend uses #CPUS + 2 workers to build the
extension. This may use up too many resources on some systems. One
can control the number of workers by setting the `MAX_JOBS` environment
variable to a non-negative number.
"""
@classmethod
def with_options(cls, **options):
"""Return a subclass with alternative constructor that extends any original keyword arguments to the original constructor with the given options."""
class cls_with_options(cls): # type: ignore[misc, valid-type]
def __init__(self, *args, **kwargs):
kwargs.update(options)
super().__init__(*args, **kwargs)
return cls_with_options
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.no_python_abi_suffix = kwargs.get("no_python_abi_suffix", False)
self.use_ninja = kwargs.get('use_ninja', True)
if self.use_ninja:
# Test if we can use ninja. Fallback otherwise.
msg = ('Attempted to use ninja as the BuildExtension backend but '
'{}. Falling back to using the slow distutils backend.')
if not is_ninja_available():
warnings.warn(msg.format('we could not find ninja.'))
self.use_ninja = False
def finalize_options(self) -> None:
super().finalize_options()
if self.use_ninja:
self.force = True
def build_extensions(self) -> None:
compiler_name, compiler_version = self._check_abi()
cuda_ext = False
sycl_ext = False
extension_iter = iter(self.extensions)
extension = next(extension_iter, None)
while not (cuda_ext and sycl_ext) and extension:
for source in extension.sources:
_, ext = os.path.splitext(source)
if ext == '.cu':
cuda_ext = True
elif ext == '.sycl':
sycl_ext = True
# This check accounts on a case when cuda and sycl sources
# are mixed in the same extension. We can stop checking
# sources if both are found or there is no more sources.
if cuda_ext and sycl_ext:
break
extension = next(extension_iter, None)
if sycl_ext:
assert self.use_ninja, "ninja is required to build sycl extensions."
if cuda_ext and not IS_HIP_EXTENSION:
_check_cuda_version(compiler_name, compiler_version)
for extension in self.extensions:
# Ensure at least an empty list of flags for 'cxx', 'nvcc' and 'sycl' when
# extra_compile_args is a dict. Otherwise, default torch flags do
# not get passed. Necessary when only one of 'cxx', 'nvcc' or 'sycl' is
# passed to extra_compile_args in CUDAExtension or SyclExtension, i.e.
# CUDAExtension(..., extra_compile_args={'cxx': [...]})
# or
# CUDAExtension(..., extra_compile_args={'nvcc': [...]})
if isinstance(extension.extra_compile_args, dict):
for ext in ['cxx', 'nvcc', 'sycl']:
if ext not in extension.extra_compile_args:
extension.extra_compile_args[ext] = []
self._add_compile_flag(extension, '-DTORCH_API_INCLUDE_EXTENSION_H')
if IS_HIP_EXTENSION:
self._hipify_compile_flags(extension)
if extension.py_limited_api:
# compile any extension that has passed in py_limited_api to the
# Extension constructor with the Py_LIMITED_API flag set to our
# min supported CPython version.
# See https://docs.python.org/3/c-api/stable.html#c.Py_LIMITED_API
self._add_compile_flag(extension, f'-DPy_LIMITED_API={min_supported_cpython}')
else:
# pybind11 is not CPython API stable so don't add these flags used when
# compiling pybind11 when pybind11 is not even used. otherwise, the build
# logs are confusing.
# See note [Pybind11 ABI constants]
for name in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]:
val = getattr(torch._C, f"_PYBIND11_{name}")
if val is not None and not IS_WINDOWS:
self._add_compile_flag(extension, f'-DPYBIND11_{name}="{val}"')
self._define_torch_extension_name(extension)
self._add_gnu_cpp_abi_flag(extension)
if 'nvcc_dlink' in extension.extra_compile_args:
assert self.use_ninja, f"With dlink=True, ninja is required to build cuda extension {extension.name}."
# Register .cu, .cuh, .hip, .mm and .sycl as valid source extensions.
# NOTE: At the moment .sycl is not a standard extension for SYCL supported
# by compiler. Here we introduce a torch level convention that SYCL sources
# should have .sycl file extension.
self.compiler.src_extensions += ['.cu', '.cuh', '.hip', '.sycl']
if torch.backends.mps.is_built():
self.compiler.src_extensions += ['.mm']
# Save the original _compile method for later.
if self.compiler.compiler_type == 'msvc':
self.compiler._cpp_extensions += ['.cu', '.cuh']
original_compile = self.compiler.compile
original_spawn = self.compiler.spawn
else:
original_compile = self.compiler._compile
def append_std17_if_no_std_present(cflags) -> None:
# NVCC does not allow multiple -std to be passed, so we avoid
# overriding the option if the user explicitly passed it.
cpp_format_prefix = '/{}:' if self.compiler.compiler_type == 'msvc' else '-{}='
cpp_flag_prefix = cpp_format_prefix.format('std')
cpp_flag = cpp_flag_prefix + 'c++17'
if not any(flag.startswith(cpp_flag_prefix) for flag in cflags):
cflags.append(cpp_flag)
def unix_cuda_flags(cflags):
cflags = (COMMON_NVCC_FLAGS +
['--compiler-options', "'-fPIC'"] +
cflags + _get_cuda_arch_flags(cflags))
# NVCC does not allow multiple -ccbin/--compiler-bindir to be passed, so we avoid
# overriding the option if the user explicitly passed it.
_ccbin = os.getenv("CC")
if (
_ccbin is not None
and not any(flag.startswith(('-ccbin', '--compiler-bindir')) for flag in cflags)
):
cflags.extend(['-ccbin', _ccbin])
return cflags
def convert_to_absolute_paths_inplace(paths):
# Helper function. See Note [Absolute include_dirs]
if paths is not None:
for i in range(len(paths)):
if not os.path.isabs(paths[i]):
paths[i] = os.path.abspath(paths[i])
def unix_wrap_single_compile(obj, src, ext, cc_args, extra_postargs, pp_opts) -> None:
# Copy before we make any modifications.
cflags = copy.deepcopy(extra_postargs)
try:
original_compiler = self.compiler.compiler_so
if _is_cuda_file(src):
nvcc = [_join_rocm_home('bin', 'hipcc') if IS_HIP_EXTENSION else _join_cuda_home('bin', 'nvcc')]
self.compiler.set_executable('compiler_so', nvcc)
if isinstance(cflags, dict):
cflags = cflags['nvcc']
if IS_HIP_EXTENSION:
cflags = COMMON_HIPCC_FLAGS + cflags + _get_rocm_arch_flags(cflags)
else:
cflags = unix_cuda_flags(cflags)
elif isinstance(cflags, dict):
cflags = cflags['cxx']
if IS_HIP_EXTENSION:
cflags = COMMON_HIP_FLAGS + cflags
append_std17_if_no_std_present(cflags)
original_compile(obj, src, ext, cc_args, cflags, pp_opts)
finally:
# Put the original compiler back in place.
self.compiler.set_executable('compiler_so', original_compiler)
def unix_wrap_ninja_compile(sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
r"""Compiles sources by outputting a ninja file and running it."""
# NB: I copied some lines from self.compiler (which is an instance
# of distutils.UnixCCompiler). See the following link.
# https://github.com/python/cpython/blob/f03a8f8d5001963ad5b5b28dbd95497e9cc15596/Lib/distutils/ccompiler.py#L564-L567
# This can be fragile, but a lot of other repos also do this
# (see https://github.com/search?q=_setup_compile&type=Code)
# so it is probably OK; we'll also get CI signal if/when
# we update our python version (which is when distutils can be
# upgraded)
# Use absolute path for output_dir so that the object file paths
# (`objects`) get generated with absolute paths.
output_dir = os.path.abspath(output_dir)
# See Note [Absolute include_dirs]
convert_to_absolute_paths_inplace(self.compiler.include_dirs)
_, objects, extra_postargs, pp_opts, _ = \
self.compiler._setup_compile(output_dir, macros,
include_dirs, sources,
depends, extra_postargs)
common_cflags = self.compiler._get_cc_args(pp_opts, debug, extra_preargs)
extra_cc_cflags = self.compiler.compiler_so[1:]
with_cuda = any(map(_is_cuda_file, sources))
with_sycl = any(map(_is_sycl_file, sources))
# extra_postargs can be either:
# - a dict mapping cxx/nvcc/sycl to extra flags
# - a list of extra flags.
if isinstance(extra_postargs, dict):
post_cflags = extra_postargs['cxx']
else:
post_cflags = list(extra_postargs)
if IS_HIP_EXTENSION:
post_cflags = COMMON_HIP_FLAGS + post_cflags
append_std17_if_no_std_present(post_cflags)
cuda_post_cflags = None
cuda_cflags = None
if with_cuda:
cuda_cflags = common_cflags
if isinstance(extra_postargs, dict):
cuda_post_cflags = extra_postargs['nvcc']
else:
cuda_post_cflags = list(extra_postargs)
if IS_HIP_EXTENSION:
cuda_post_cflags = cuda_post_cflags + _get_rocm_arch_flags(cuda_post_cflags)
cuda_post_cflags = COMMON_HIP_FLAGS + COMMON_HIPCC_FLAGS + cuda_post_cflags
else:
cuda_post_cflags = unix_cuda_flags(cuda_post_cflags)
append_std17_if_no_std_present(cuda_post_cflags)
cuda_cflags = [shlex.quote(f) for f in cuda_cflags]
cuda_post_cflags = [shlex.quote(f) for f in cuda_post_cflags]
if isinstance(extra_postargs, dict) and 'nvcc_dlink' in extra_postargs:
cuda_dlink_post_cflags = unix_cuda_flags(extra_postargs['nvcc_dlink'])
else:
cuda_dlink_post_cflags = None
sycl_post_cflags = None
sycl_cflags = None
sycl_dlink_post_cflags = None
if with_sycl:
sycl_cflags = extra_cc_cflags + common_cflags + _COMMON_SYCL_FLAGS
if isinstance(extra_postargs, dict):
sycl_post_cflags = extra_postargs['sycl']
else:
sycl_post_cflags = list(extra_postargs)
append_std17_if_no_std_present(sycl_cflags)
_append_sycl_std_if_no_std_present(sycl_cflags)
host_cflags = extra_cc_cflags + common_cflags + post_cflags
append_std17_if_no_std_present(host_cflags)
# escaping quoted arguments to pass them thru SYCL compiler
host_cflags = [item.replace('"', '\\\\"') for item in host_cflags]
host_cflags = ' '.join(host_cflags)
# Note the order: shlex.quote sycl_flags first, _wrap_sycl_host_flags
# second. Reason is that sycl host flags are quoted, space containing
# strings passed to SYCL compiler.
sycl_cflags = [shlex.quote(f) for f in sycl_cflags]
sycl_cflags += _wrap_sycl_host_flags(host_cflags)
sycl_dlink_post_cflags = _SYCL_DLINK_FLAGS
sycl_post_cflags = [shlex.quote(f) for f in sycl_post_cflags]
_write_ninja_file_and_compile_objects(
sources=sources,
objects=objects,
cflags=[shlex.quote(f) for f in extra_cc_cflags + common_cflags],
post_cflags=[shlex.quote(f) for f in post_cflags],
cuda_cflags=cuda_cflags,
cuda_post_cflags=cuda_post_cflags,
cuda_dlink_post_cflags=cuda_dlink_post_cflags,
sycl_cflags=sycl_cflags,
sycl_post_cflags=sycl_post_cflags,
sycl_dlink_post_cflags=sycl_dlink_post_cflags,
build_directory=output_dir,
verbose=True,
with_cuda=with_cuda,
with_sycl=with_sycl)
# Return *all* object filenames, not just the ones we just built.
return objects
def win_cuda_flags(cflags):
return (COMMON_NVCC_FLAGS +
cflags + _get_cuda_arch_flags(cflags))
def win_wrap_single_compile(sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
self.cflags = copy.deepcopy(extra_postargs)
extra_postargs = None
def spawn(cmd):
# Using regex to match src, obj and include files
src_regex = re.compile('/T(p|c)(.*)')
src_list = [
m.group(2) for m in (src_regex.match(elem) for elem in cmd)
if m
]
obj_regex = re.compile('/Fo(.*)')
obj_list = [
m.group(1) for m in (obj_regex.match(elem) for elem in cmd)
if m
]
include_regex = re.compile(r'((\-|\/)I.*)')
include_list = [
m.group(1)
for m in (include_regex.match(elem) for elem in cmd) if m
]
if len(src_list) >= 1 and len(obj_list) >= 1:
src = src_list[0]
obj = obj_list[0]
if _is_cuda_file(src):
nvcc = _join_cuda_home('bin', 'nvcc')
if isinstance(self.cflags, dict):
cflags = self.cflags['nvcc']
elif isinstance(self.cflags, list):
cflags = self.cflags
else:
cflags = []
cflags = win_cuda_flags(cflags) + ['-std=c++17', '--use-local-env']
for flag in COMMON_MSVC_FLAGS:
cflags = ['-Xcompiler', flag] + cflags
for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS:
cflags = ['-Xcudafe', '--diag_suppress=' + ignore_warning] + cflags
cmd = [nvcc, '-c', src, '-o', obj] + include_list + cflags
elif isinstance(self.cflags, dict):
cflags = COMMON_MSVC_FLAGS + self.cflags['cxx']
append_std17_if_no_std_present(cflags)
cmd += cflags
elif isinstance(self.cflags, list):
cflags = COMMON_MSVC_FLAGS + self.cflags
append_std17_if_no_std_present(cflags)
cmd += cflags
return original_spawn(cmd)
try:
self.compiler.spawn = spawn
return original_compile(sources, output_dir, macros,
include_dirs, debug, extra_preargs,
extra_postargs, depends)
finally:
self.compiler.spawn = original_spawn
def win_wrap_ninja_compile(sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
if not self.compiler.initialized:
self.compiler.initialize()
output_dir = os.path.abspath(output_dir)
# Note [Absolute include_dirs]
# Convert relative path in self.compiler.include_dirs to absolute path if any.
# For ninja build, the build location is not local, but instead, the build happens
# in a script-created build folder. Thus, relative paths lose their correctness.
# To be consistent with jit extension, we allow user to enter relative include_dirs
# in setuptools.setup, and we convert the relative path to absolute path here.
convert_to_absolute_paths_inplace(self.compiler.include_dirs)
_, objects, extra_postargs, pp_opts, _ = \
self.compiler._setup_compile(output_dir, macros,
include_dirs, sources,
depends, extra_postargs)
common_cflags = extra_preargs or []
cflags = []
if debug:
cflags.extend(self.compiler.compile_options_debug)
else:
cflags.extend(self.compiler.compile_options)
common_cflags.extend(COMMON_MSVC_FLAGS)
cflags = cflags + common_cflags + pp_opts
with_cuda = any(map(_is_cuda_file, sources))
# extra_postargs can be either:
# - a dict mapping cxx/nvcc to extra flags
# - a list of extra flags.
if isinstance(extra_postargs, dict):
post_cflags = extra_postargs['cxx']
else:
post_cflags = list(extra_postargs)
append_std17_if_no_std_present(post_cflags)
cuda_post_cflags = None
cuda_cflags = None
if with_cuda:
cuda_cflags = ['-std=c++17', '--use-local-env']
for common_cflag in common_cflags:
cuda_cflags.append('-Xcompiler')
cuda_cflags.append(common_cflag)
for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS:
cuda_cflags.append('-Xcudafe')
cuda_cflags.append('--diag_suppress=' + ignore_warning)
cuda_cflags.extend(pp_opts)
if isinstance(extra_postargs, dict):
cuda_post_cflags = extra_postargs['nvcc']
else:
cuda_post_cflags = list(extra_postargs)
cuda_post_cflags = win_cuda_flags(cuda_post_cflags)
cflags = _nt_quote_args(cflags)
post_cflags = _nt_quote_args(post_cflags)
if with_cuda:
cuda_cflags = _nt_quote_args(cuda_cflags)
cuda_post_cflags = _nt_quote_args(cuda_post_cflags)
if isinstance(extra_postargs, dict) and 'nvcc_dlink' in extra_postargs:
cuda_dlink_post_cflags = win_cuda_flags(extra_postargs['nvcc_dlink'])
else:
cuda_dlink_post_cflags = None
_write_ninja_file_and_compile_objects(
sources=sources,
objects=objects,
cflags=cflags,
post_cflags=post_cflags,
cuda_cflags=cuda_cflags,
cuda_post_cflags=cuda_post_cflags,
cuda_dlink_post_cflags=cuda_dlink_post_cflags,
sycl_cflags=None,
sycl_post_cflags=None,
sycl_dlink_post_cflags=None,
build_directory=output_dir,
verbose=True,
with_cuda=with_cuda,
with_sycl=False)
# Return *all* object filenames, not just the ones we just built.
return objects
# Monkey-patch the _compile or compile method.
# https://github.com/python/cpython/blob/dc0284ee8f7a270b6005467f26d8e5773d76e959/Lib/distutils/ccompiler.py#L511
if self.compiler.compiler_type == 'msvc':
if self.use_ninja:
self.compiler.compile = win_wrap_ninja_compile
else:
self.compiler.compile = win_wrap_single_compile
else:
if self.use_ninja:
self.compiler.compile = unix_wrap_ninja_compile
else:
self.compiler._compile = unix_wrap_single_compile
build_ext.build_extensions(self)
def get_ext_filename(self, ext_name):
# Get the original shared library name. For Python 3, this name will be
# suffixed with "<SOABI>.so", where <SOABI> will be something like
# cpython-37m-x86_64-linux-gnu.
ext_filename = super().get_ext_filename(ext_name)
# If `no_python_abi_suffix` is `True`, we omit the Python 3 ABI
# component. This makes building shared libraries with setuptools that
# aren't Python modules nicer.
if self.no_python_abi_suffix:
# The parts will be e.g. ["my_extension", "cpython-37m-x86_64-linux-gnu", "so"].
ext_filename_parts = ext_filename.split('.')
# Omit the second to last element.
without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:]
ext_filename = '.'.join(without_abi)
return ext_filename
def _check_abi(self) -> tuple[str, TorchVersion]:
# On some platforms, like Windows, compiler_cxx is not available.
if hasattr(self.compiler, 'compiler_cxx'):
compiler = self.compiler.compiler_cxx[0]
else:
compiler = get_cxx_compiler()
_, version = get_compiler_abi_compatibility_and_version(compiler)
# Warn user if VC env is activated but `DISTUILS_USE_SDK` is not set.
if IS_WINDOWS and 'VSCMD_ARG_TGT_ARCH' in os.environ and 'DISTUTILS_USE_SDK' not in os.environ:
msg = ('It seems that the VC environment is activated but DISTUTILS_USE_SDK is not set.'
'This may lead to multiple activations of the VC env.'
'Please set `DISTUTILS_USE_SDK=1` and try again.')
raise UserWarning(msg)
return compiler, version
def _add_compile_flag(self, extension, flag):
extension.extra_compile_args = copy.deepcopy(extension.extra_compile_args)
if isinstance(extension.extra_compile_args, dict):
for args in extension.extra_compile_args.values():
args.append(flag)
else:
extension.extra_compile_args.append(flag)
# Simple hipify, replace the first occurrence of CUDA with HIP
# in flags starting with "-" and containing "CUDA", but exclude -I flags
def _hipify_compile_flags(self, extension):
if isinstance(extension.extra_compile_args, dict) and 'nvcc' in extension.extra_compile_args:
modified_flags = []
for flag in extension.extra_compile_args['nvcc']:
if flag.startswith("-") and "CUDA" in flag and not flag.startswith("-I"):
# check/split flag into flag and value
parts = flag.split("=", 1)
if len(parts) == 2:
flag_part, value_part = parts
# replace fist instance of "CUDA" with "HIP" only in the flag and not flag value
modified_flag_part = flag_part.replace("CUDA", "HIP", 1)
modified_flag = f"{modified_flag_part}={value_part}"
else:
# replace fist instance of "CUDA" with "HIP" in flag
modified_flag = flag.replace("CUDA", "HIP", 1)
modified_flags.append(modified_flag)
print(f'Modified flag: {flag} -> {modified_flag}', file=sys.stderr)
else:
modified_flags.append(flag)
extension.extra_compile_args['nvcc'] = modified_flags
def _define_torch_extension_name(self, extension):
# pybind11 doesn't support dots in the names
# so in order to support extensions in the packages
# like torch._C, we take the last part of the string
# as the library name
names = extension.name.split('.')
name = names[-1]
define = f'-DTORCH_EXTENSION_NAME={name}'
self._add_compile_flag(extension, define)
def _add_gnu_cpp_abi_flag(self, extension):
# use the same CXX ABI as what PyTorch was compiled with
self._add_compile_flag(extension, '-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI)))
def CppExtension(name, sources, *args, **kwargs):
"""
Create a :class:`setuptools.Extension` for C++.
Convenience method that creates a :class:`setuptools.Extension` with the
bare minimum (but often sufficient) arguments to build a C++ extension.
All arguments are forwarded to the :class:`setuptools.Extension`
constructor. Full list arguments can be found at
https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#extension-api-reference
.. warning::
The PyTorch python API (as provided in libtorch_python) cannot be built
with the flag ``py_limited_api=True``. When this flag is passed, it is
the user's responsibility in their library to not use APIs from
libtorch_python (in particular pytorch/python bindings) and to only use
APIs from libtorch (aten objects, operators and the dispatcher). For
example, to give access to custom ops from python, the library should
register the ops through the dispatcher.
Contrary to CPython setuptools, who does not define -DPy_LIMITED_API
as a compile flag when py_limited_api is specified as an option for
the "bdist_wheel" command in ``setup``, PyTorch does! We will specify
-DPy_LIMITED_API=min_supported_cpython to best enforce consistency,
safety, and sanity in order to encourage best practices. To target a
different version, set min_supported_cpython to the hexcode of the
CPython version of choice.
Example:
>>> # xdoctest: +SKIP
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT)
>>> from setuptools import setup
>>> from torch.utils.cpp_extension import BuildExtension, CppExtension
>>> setup(
... name='extension',
... ext_modules=[
... CppExtension(
... name='extension',
... sources=['extension.cpp'],
... extra_compile_args=['-g'],
... extra_link_args=['-Wl,--no-as-needed', '-lm'])
... ],
... cmdclass={
... 'build_ext': BuildExtension
... })
"""
include_dirs = kwargs.get('include_dirs', [])
include_dirs += include_paths()
kwargs['include_dirs'] = include_dirs
library_dirs = kwargs.get('library_dirs', [])
library_dirs += library_paths()
kwargs['library_dirs'] = library_dirs
libraries = kwargs.get('libraries', [])
libraries.append('c10')
libraries.append('torch')
libraries.append('torch_cpu')
if not kwargs.get('py_limited_api', False):
# torch_python uses more than the python limited api
libraries.append('torch_python')
if IS_WINDOWS:
libraries.append("sleef")
kwargs['libraries'] = libraries
kwargs['language'] = 'c++'
return setuptools.Extension(name, sources, *args, **kwargs)
def CUDAExtension(name, sources, *args, **kwargs):
"""
Create a :class:`setuptools.Extension` for CUDA/C++.
Convenience method that creates a :class:`setuptools.Extension` with the
bare minimum (but often sufficient) arguments to build a CUDA/C++
extension. This includes the CUDA include path, library path and runtime
library.
All arguments are forwarded to the :class:`setuptools.Extension`
constructor. Full list arguments can be found at
https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#extension-api-reference
.. warning::
The PyTorch python API (as provided in libtorch_python) cannot be built
with the flag ``py_limited_api=True``. When this flag is passed, it is
the user's responsibility in their library to not use APIs from
libtorch_python (in particular pytorch/python bindings) and to only use
APIs from libtorch (aten objects, operators and the dispatcher). For
example, to give access to custom ops from python, the library should
register the ops through the dispatcher.
Contrary to CPython setuptools, who does not define -DPy_LIMITED_API
as a compile flag when py_limited_api is specified as an option for
the "bdist_wheel" command in ``setup``, PyTorch does! We will specify
-DPy_LIMITED_API=min_supported_cpython to best enforce consistency,
safety, and sanity in order to encourage best practices. To target a
different version, set min_supported_cpython to the hexcode of the
CPython version of choice.
Example:
>>> # xdoctest: +SKIP
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT)
>>> from setuptools import setup
>>> from torch.utils.cpp_extension import BuildExtension, CUDAExtension
>>> setup(
... name='cuda_extension',
... ext_modules=[
... CUDAExtension(
... name='cuda_extension',
... sources=['extension.cpp', 'extension_kernel.cu'],
... extra_compile_args={'cxx': ['-g'],
... 'nvcc': ['-O2']},
... extra_link_args=['-Wl,--no-as-needed', '-lcuda'])
... ],
... cmdclass={
... 'build_ext': BuildExtension
... })
Compute capabilities:
By default the extension will be compiled to run on all archs of the cards visible during the
building process of the extension, plus PTX. If down the road a new card is installed the
extension may need to be recompiled. If a visible card has a compute capability (CC) that's
newer than the newest version for which your nvcc can build fully-compiled binaries, PyTorch
will make nvcc fall back to building kernels with the newest version of PTX your nvcc does
support (see below for details on PTX).
You can override the default behavior using `TORCH_CUDA_ARCH_LIST` to explicitly specify which
CCs you want the extension to support:
``TORCH_CUDA_ARCH_LIST="6.1 8.6" python build_my_extension.py``
``TORCH_CUDA_ARCH_LIST="5.2 6.0 6.1 7.0 7.5 8.0 8.6+PTX" python build_my_extension.py``
The +PTX option causes extension kernel binaries to include PTX instructions for the specified
CC. PTX is an intermediate representation that allows kernels to runtime-compile for any CC >=
the specified CC (for example, 8.6+PTX generates PTX that can runtime-compile for any GPU with
CC >= 8.6). This improves your binary's forward compatibility. However, relying on older PTX to
provide forward compat by runtime-compiling for newer CCs can modestly reduce performance on
those newer CCs. If you know exact CC(s) of the GPUs you want to target, you're always better
off specifying them individually. For example, if you want your extension to run on 8.0 and 8.6,
"8.0+PTX" would work functionally because it includes PTX that can runtime-compile for 8.6, but
"8.0 8.6" would be better.
Note that while it's possible to include all supported archs, the more archs get included the
slower the building process will be, as it will build a separate kernel image for each arch.
Note that CUDA-11.5 nvcc will hit internal compiler error while parsing torch/extension.h on Windows.
To workaround the issue, move python binding logic to pure C++ file.
Example use:
#include <ATen/ATen.h>
at::Tensor SigmoidAlphaBlendForwardCuda(....)
Instead of:
#include <torch/extension.h>
torch::Tensor SigmoidAlphaBlendForwardCuda(...)
Currently open issue for nvcc bug: https://github.com/pytorch/pytorch/issues/69460
Complete workaround code example: https://github.com/facebookresearch/pytorch3d/commit/cb170ac024a949f1f9614ffe6af1c38d972f7d48
Relocatable device code linking:
If you want to reference device symbols across compilation units (across object files),
the object files need to be built with `relocatable device code` (-rdc=true or -dc).
An exception to this rule is "dynamic parallelism" (nested kernel launches) which is not used a lot anymore.
`Relocatable device code` is less optimized so it needs to be used only on object files that need it.
Using `-dlto` (Device Link Time Optimization) at the device code compilation step and `dlink` step
helps reduce the protentional perf degradation of `-rdc`.
Note that it needs to be used at both steps to be useful.
If you have `rdc` objects you need to have an extra `-dlink` (device linking) step before the CPU symbol linking step.
There is also a case where `-dlink` is used without `-rdc`:
when an extension is linked against a static lib containing rdc-compiled objects
like the [NVSHMEM library](https://developer.nvidia.com/nvshmem).
Note: Ninja is required to build a CUDA Extension with RDC linking.
Example:
>>> # xdoctest: +SKIP
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT)
>>> CUDAExtension(
... name='cuda_extension',
... sources=['extension.cpp', 'extension_kernel.cu'],
... dlink=True,
... dlink_libraries=["dlink_lib"],
... extra_compile_args={'cxx': ['-g'],
... 'nvcc': ['-O2', '-rdc=true']})
"""
library_dirs = kwargs.get('library_dirs', [])
library_dirs += library_paths(device_type="cuda")
kwargs['library_dirs'] = library_dirs
libraries = kwargs.get('libraries', [])
libraries.append('c10')
libraries.append('torch')
libraries.append('torch_cpu')
if not kwargs.get('py_limited_api', False):
# torch_python uses more than the python limited api
libraries.append('torch_python')
if IS_HIP_EXTENSION:
libraries.append('amdhip64')
libraries.append('c10_hip')
libraries.append('torch_hip')
else:
libraries.append('cudart')
libraries.append('c10_cuda')
libraries.append('torch_cuda')
kwargs['libraries'] = libraries
include_dirs = kwargs.get('include_dirs', [])
if IS_HIP_EXTENSION:
build_dir = os.getcwd()
hipify_result = hipify_python.hipify(
project_directory=build_dir,
output_directory=build_dir,
header_include_dirs=include_dirs,
includes=[os.path.join(build_dir, '*')], # limit scope to build_dir only
extra_files=[os.path.abspath(s) for s in sources],
show_detailed=True,
is_pytorch_extension=True,
hipify_extra_files_only=True, # don't hipify everything in includes path
)
hipified_sources = set()
for source in sources:
s_abs = os.path.abspath(source)
hipified_s_abs = (hipify_result[s_abs].hipified_path if (s_abs in hipify_result and
hipify_result[s_abs].hipified_path is not None) else s_abs)
# setup() arguments must *always* be /-separated paths relative to the setup.py directory,
# *never* absolute paths
hipified_sources.add(os.path.relpath(hipified_s_abs, build_dir))
sources = list(hipified_sources)
include_dirs += include_paths(device_type="cuda")
kwargs['include_dirs'] = include_dirs
kwargs['language'] = 'c++'
dlink_libraries = kwargs.get('dlink_libraries', [])
dlink = kwargs.get('dlink', False) or dlink_libraries
if dlink:
extra_compile_args = kwargs.get('extra_compile_args', {})
extra_compile_args_dlink = extra_compile_args.get('nvcc_dlink', [])
extra_compile_args_dlink += ['-dlink']
extra_compile_args_dlink += [f'-L{x}' for x in library_dirs]
extra_compile_args_dlink += [f'-l{x}' for x in dlink_libraries]
if (torch.version.cuda is not None) and TorchVersion(torch.version.cuda) >= '11.2':
extra_compile_args_dlink += ['-dlto'] # Device Link Time Optimization started from cuda 11.2
extra_compile_args['nvcc_dlink'] = extra_compile_args_dlink
kwargs['extra_compile_args'] = extra_compile_args
return setuptools.Extension(name, sources, *args, **kwargs)
def SyclExtension(name, sources, *args, **kwargs):
r"""
Creates a :class:`setuptools.Extension` for SYCL/C++.
Convenience method that creates a :class:`setuptools.Extension` with the
bare minimum (but often sufficient) arguments to build a SYCL/C++
extension.
All arguments are forwarded to the :class:`setuptools.Extension`
constructor.
.. warning::
The PyTorch python API (as provided in libtorch_python) cannot be built
with the flag ``py_limited_api=True``. When this flag is passed, it is
the user's responsibility in their library to not use APIs from
libtorch_python (in particular pytorch/python bindings) and to only use
APIs from libtorch (aten objects, operators and the dispatcher). For
example, to give access to custom ops from python, the library should
register the ops through the dispatcher.
Contrary to CPython setuptools, who does not define -DPy_LIMITED_API
as a compile flag when py_limited_api is specified as an option for
the "bdist_wheel" command in ``setup``, PyTorch does! We will specify
-DPy_LIMITED_API=min_supported_cpython to best enforce consistency,
safety, and sanity in order to encourage best practices. To target a
different version, set min_supported_cpython to the hexcode of the
CPython version of choice.
Example:
>>> # xdoctest: +SKIP
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT)
>>> from torch.utils.cpp_extension import BuildExtension, SyclExtension
>>> setup(
... name='xpu_extension',
... ext_modules=[
... SyclExtension(
... name='xpu_extension',
... sources=['extension.cpp', 'extension_kernel.cpp'],
... extra_compile_args={'cxx': ['-g', '-std=c++20', '-fPIC']})
... ],
... cmdclass={
... 'build_ext': BuildExtension
... })
By default the extension will be compiled to run on all archs of the cards visible during the
building process of the extension. If down the road a new card is installed the
extension may need to be recompiled. You can override the default behavior using
`TORCH_XPU_ARCH_LIST` to explicitly specify which device architectures you want the extension
to support:
``TORCH_XPU_ARCH_LIST="pvc,xe-lpg" python build_my_extension.py``
Note that while it's possible to include all supported archs, the more archs get included the
slower the building process will be, as it will build a separate kernel image for each arch.
Note: Ninja is required to build SyclExtension.
"""
library_dirs = kwargs.get("library_dirs", [])
library_dirs += library_paths()
kwargs["library_dirs"] = library_dirs
libraries = kwargs.get("libraries", [])
libraries.append("c10")
libraries.append("c10_xpu")
libraries.append("torch")
libraries.append("torch_cpu")
if not kwargs.get('py_limited_api', False):
# torch_python uses more than the python limited api
libraries.append("torch_python")
libraries.append("torch_xpu")
kwargs["libraries"] = libraries
include_dirs = kwargs.get("include_dirs", [])
include_dirs += include_paths()
kwargs["include_dirs"] = include_dirs
kwargs["language"] = "c++"
return setuptools.Extension(name, sources, *args, **kwargs)
def include_paths(device_type: str = "cpu") -> list[str]:
"""
Get the include paths required to build a C++ or CUDA or SYCL extension.
Args:
device_type: Defaults to "cpu".
Returns:
A list of include path strings.
"""
lib_include = os.path.join(_TORCH_PATH, 'include')
paths = [
lib_include,
# Remove this once torch/torch.h is officially no longer supported for C++ extensions.
os.path.join(lib_include, 'torch', 'csrc', 'api', 'include'),
]
if device_type == "cuda" and IS_HIP_EXTENSION:
paths.append(os.path.join(lib_include, 'THH'))
paths.append(_join_rocm_home('include'))
elif device_type == "cuda":
cuda_home_include = _join_cuda_home('include')
# if we have the Debian/Ubuntu packages for cuda, we get /usr as cuda home.
# but gcc doesn't like having /usr/include passed explicitly
if cuda_home_include != '/usr/include':
paths.append(cuda_home_include)
# Support CUDA_INC_PATH env variable supported by CMake files
if (cuda_inc_path := os.environ.get("CUDA_INC_PATH", None)) and \
cuda_inc_path != '/usr/include':
paths.append(cuda_inc_path)
if CUDNN_HOME is not None:
paths.append(os.path.join(CUDNN_HOME, 'include'))
elif device_type == "xpu":
paths.append(_join_sycl_home('include'))
paths.append(_join_sycl_home('include', 'sycl'))
return paths
def library_paths(device_type: str = "cpu") -> list[str]:
"""
Get the library paths required to build a C++ or CUDA extension.
Args:
device_type: Defaults to "cpu".
Returns:
A list of library path strings.
"""
# We need to link against libtorch.so
paths = [TORCH_LIB_PATH]
if device_type == "cuda" and IS_HIP_EXTENSION:
lib_dir = 'lib'
paths.append(_join_rocm_home(lib_dir))
if HIP_HOME is not None:
paths.append(os.path.join(HIP_HOME, 'lib'))
elif device_type == "cuda":
if IS_WINDOWS:
lib_dir = os.path.join('lib', 'x64')
else:
lib_dir = 'lib64'
if (not os.path.exists(_join_cuda_home(lib_dir)) and
os.path.exists(_join_cuda_home('lib'))):
# 64-bit CUDA may be installed in 'lib' (see e.g. gh-16955)
# Note that it's also possible both don't exist (see
# _find_cuda_home) - in that case we stay with 'lib64'.
lib_dir = 'lib'
paths.append(_join_cuda_home(lib_dir))
if CUDNN_HOME is not None:
paths.append(os.path.join(CUDNN_HOME, lib_dir))
elif device_type == "xpu":
if IS_WINDOWS:
lib_dir = os.path.join('lib', 'x64')
else:
lib_dir = 'lib64'
if (not os.path.exists(_join_sycl_home(lib_dir)) and
os.path.exists(_join_sycl_home('lib'))):
lib_dir = 'lib'
paths.append(_join_sycl_home(lib_dir))
return paths
def load(name,
sources: Union[str, list[str]],
extra_cflags=None,
extra_cuda_cflags=None,
extra_sycl_cflags=None,
extra_ldflags=None,
extra_include_paths=None,
build_directory=None,
verbose=False,
with_cuda: Optional[bool] = None,
with_sycl: Optional[bool] = None,
is_python_module=True,
is_standalone=False,
keep_intermediates=True):
"""
Load a PyTorch C++ extension just-in-time (JIT).
To load an extension, a Ninja build file is emitted, which is used to
compile the given sources into a dynamic library. This library is
subsequently loaded into the current Python process as a module and
returned from this function, ready for use.
By default, the directory to which the build file is emitted and the
resulting library compiled to is ``<tmp>/torch_extensions/<name>``, where
``<tmp>`` is the temporary folder on the current platform and ``<name>``
the name of the extension. This location can be overridden in two ways.
First, if the ``TORCH_EXTENSIONS_DIR`` environment variable is set, it
replaces ``<tmp>/torch_extensions`` and all extensions will be compiled
into subfolders of this directory. Second, if the ``build_directory``
argument to this function is supplied, it overrides the entire path, i.e.
the library will be compiled into that folder directly.
To compile the sources, the default system compiler (``c++``) is used,
which can be overridden by setting the ``CXX`` environment variable. To pass
additional arguments to the compilation process, ``extra_cflags`` or
``extra_ldflags`` can be provided. For example, to compile your extension
with optimizations, pass ``extra_cflags=['-O3']``. You can also use
``extra_cflags`` to pass further include directories.
CUDA support with mixed compilation is provided. Simply pass CUDA source
files (``.cu`` or ``.cuh``) along with other sources. Such files will be
detected and compiled with nvcc rather than the C++ compiler. This includes
passing the CUDA lib64 directory as a library directory, and linking
``cudart``. You can pass additional flags to nvcc via
``extra_cuda_cflags``, just like with ``extra_cflags`` for C++. Various
heuristics for finding the CUDA install directory are used, which usually
work fine. If not, setting the ``CUDA_HOME`` environment variable is the
safest option.
SYCL support with mixed compilation is provided. Simply pass SYCL source
files (``.sycl``) along with other sources. Such files will be detected
and compiled with SYCL compiler (such as Intel DPC++ Compiler) rather
than the C++ compiler. You can pass additional flags to SYCL compiler
via ``extra_sycl_cflags``, just like with ``extra_cflags`` for C++.
SYCL compiler is expected to be found via system PATH environment
variable.
Args:
name: The name of the extension to build. This MUST be the same as the
name of the pybind11 module!
sources: A list of relative or absolute paths to C++ source files.
extra_cflags: optional list of compiler flags to forward to the build.
extra_cuda_cflags: optional list of compiler flags to forward to nvcc
when building CUDA sources.
extra_sycl_cflags: optional list of compiler flags to forward to SYCL
compiler when building SYCL sources.
extra_ldflags: optional list of linker flags to forward to the build.
extra_include_paths: optional list of include directories to forward
to the build.
build_directory: optional path to use as build workspace.
verbose: If ``True``, turns on verbose logging of load steps.
with_cuda: Determines whether CUDA headers and libraries are added to
the build. If set to ``None`` (default), this value is
automatically determined based on the existence of ``.cu`` or
``.cuh`` in ``sources``. Set it to `True`` to force CUDA headers
and libraries to be included.
with_sycl: Determines whether SYCL headers and libraries are added to
the build. If set to ``None`` (default), this value is
automatically determined based on the existence of ``.sycl`` in
``sources``. Set it to `True`` to force SYCL headers and
libraries to be included.
is_python_module: If ``True`` (default), imports the produced shared
library as a Python module. If ``False``, behavior depends on
``is_standalone``.
is_standalone: If ``False`` (default) loads the constructed extension
into the process as a plain dynamic library. If ``True``, build a
standalone executable.
Returns:
If ``is_python_module`` is ``True``:
Returns the loaded PyTorch extension as a Python module.
If ``is_python_module`` is ``False`` and ``is_standalone`` is ``False``:
Returns nothing. (The shared library is loaded into the process as
a side effect.)
If ``is_standalone`` is ``True``.
Return the path to the executable. (On Windows, TORCH_LIB_PATH is
added to the PATH environment variable as a side effect.)
Example:
>>> # xdoctest: +SKIP
>>> from torch.utils.cpp_extension import load
>>> module = load(
... name='extension',
... sources=['extension.cpp', 'extension_kernel.cu'],
... extra_cflags=['-O2'],
... verbose=True)
"""
return _jit_compile(
name,
[sources] if isinstance(sources, str) else sources,
extra_cflags,
extra_cuda_cflags,
extra_sycl_cflags,
extra_ldflags,
extra_include_paths,
build_directory or _get_build_directory(name, verbose),
verbose,
with_cuda,
with_sycl,
is_python_module,
is_standalone,
keep_intermediates=keep_intermediates)
def _get_pybind11_abi_build_flags():
# Note [Pybind11 ABI constants]
#
# Pybind11 before 2.4 used to build an ABI strings using the following pattern:
# f"__pybind11_internals_v{PYBIND11_INTERNALS_VERSION}{PYBIND11_INTERNALS_KIND}{PYBIND11_BUILD_TYPE}__"
# Since 2.4 compier type, stdlib and build abi parameters are also encoded like this:
# f"__pybind11_internals_v{PYBIND11_INTERNALS_VERSION}{PYBIND11_INTERNALS_KIND}{PYBIND11_COMPILER_TYPE}{PYBIND11_STDLIB}{PYBIND11_BUILD_ABI}{PYBIND11_BUILD_TYPE}__"
#
# This was done in order to further narrow down the chances of compiler ABI incompatibility
# that can cause a hard to debug segfaults.
# For PyTorch extensions we want to relax those restrictions and pass compiler, stdlib and abi properties
# captured during PyTorch native library compilation in torch/csrc/Module.cpp
abi_cflags = []
for pname in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]:
pval = getattr(torch._C, f"_PYBIND11_{pname}")
if pval is not None and not IS_WINDOWS:
abi_cflags.append(f'-DPYBIND11_{pname}=\\"{pval}\\"')
return abi_cflags
def _get_glibcxx_abi_build_flags():
glibcxx_abi_cflags = ['-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))]
return glibcxx_abi_cflags
def check_compiler_is_gcc(compiler):
if not IS_LINUX:
return False
env = os.environ.copy()
env['LC_ALL'] = 'C' # Don't localize output
try:
version_string = subprocess.check_output([compiler, '-v'], stderr=subprocess.STDOUT, env=env).decode(*SUBPROCESS_DECODE_ARGS)
except Exception:
try:
version_string = subprocess.check_output([compiler, '--version'], stderr=subprocess.STDOUT, env=env).decode(*SUBPROCESS_DECODE_ARGS)
except Exception:
return False
# Check for 'gcc' or 'g++' for sccache wrapper
pattern = re.compile("^COLLECT_GCC=(.*)$", re.MULTILINE)
results = re.findall(pattern, version_string)
if len(results) != 1:
return False
compiler_path = os.path.realpath(results[0].strip())
# On RHEL/CentOS c++ is a gcc compiler wrapper
if os.path.basename(compiler_path) == 'c++' and 'gcc version' in version_string:
return True
return False
def _check_and_build_extension_h_precompiler_headers(
extra_cflags,
extra_include_paths,
is_standalone=False):
r'''
Precompiled Headers(PCH) can pre-build the same headers and reduce build time for pytorch load_inline modules.
GCC offical manual: https://gcc.gnu.org/onlinedocs/gcc-4.0.4/gcc/Precompiled-Headers.html
PCH only works when built pch file(header.h.gch) and build target have the same build parameters. So, We need
add a signature file to record PCH file parameters. If the build parameters(signature) changed, it should rebuild
PCH file.
Note:
1. Windows and MacOS have different PCH mechanism. We only support Linux currently.
2. It only works on GCC/G++.
'''
if not IS_LINUX:
return
compiler = get_cxx_compiler()
b_is_gcc = check_compiler_is_gcc(compiler)
if b_is_gcc is False:
return
head_file = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h')
head_file_pch = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.gch')
head_file_signature = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.sign')
def listToString(s):
# initialize an empty string
string = ""
if s is None:
return string
# traverse in the string
for element in s:
string += (element + ' ')
# return string
return string
def format_precompiler_header_cmd(compiler, head_file, head_file_pch, common_cflags, torch_include_dirs, extra_cflags, extra_include_paths):
return re.sub(
r"[ \n]+",
" ",
f"""
{compiler} -x c++-header {head_file} -o {head_file_pch} {torch_include_dirs} {extra_include_paths} {extra_cflags} {common_cflags}
""",
).strip()
def command_to_signature(cmd):
signature = cmd.replace(' ', '_')
return signature
def check_pch_signature_in_file(file_path, signature):
b_exist = os.path.isfile(file_path)
if b_exist is False:
return False
with open(file_path) as file:
# read all content of a file
content = file.read()
# check if string present in a file
return signature == content
def _create_if_not_exist(path_dir):
if not os.path.exists(path_dir):
try:
Path(path_dir).mkdir(parents=True, exist_ok=True)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise RuntimeError(f"Fail to create path {path_dir}") from exc
def write_pch_signature_to_file(file_path, pch_sign):
_create_if_not_exist(os.path.dirname(file_path))
with open(file_path, "w") as f:
f.write(pch_sign)
f.close()
def build_precompile_header(pch_cmd):
try:
subprocess.check_output(pch_cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Compile PreCompile Header fail, command: {pch_cmd}") from e
extra_cflags_str = listToString(extra_cflags)
extra_include_paths_str = " ".join(
[f"-I{include}" for include in extra_include_paths] if extra_include_paths else []
)
lib_include = os.path.join(_TORCH_PATH, 'include')
torch_include_dirs = [
f"-I {lib_include}",
# Python.h
"-I {}".format(sysconfig.get_path("include")),
# torch/all.h
"-I {}".format(os.path.join(lib_include, 'torch', 'csrc', 'api', 'include')),
]
torch_include_dirs_str = listToString(torch_include_dirs)
common_cflags = []
if not is_standalone:
common_cflags += ['-DTORCH_API_INCLUDE_EXTENSION_H']
common_cflags += ['-std=c++17', '-fPIC']
common_cflags += [f"{x}" for x in _get_pybind11_abi_build_flags()]
common_cflags += [f"{x}" for x in _get_glibcxx_abi_build_flags()]
common_cflags_str = listToString(common_cflags)
pch_cmd = format_precompiler_header_cmd(compiler, head_file, head_file_pch, common_cflags_str, torch_include_dirs_str, extra_cflags_str, extra_include_paths_str)
pch_sign = command_to_signature(pch_cmd)
if os.path.isfile(head_file_pch) is not True:
build_precompile_header(pch_cmd)
write_pch_signature_to_file(head_file_signature, pch_sign)
else:
b_same_sign = check_pch_signature_in_file(head_file_signature, pch_sign)
if b_same_sign is False:
build_precompile_header(pch_cmd)
write_pch_signature_to_file(head_file_signature, pch_sign)
def remove_extension_h_precompiler_headers():
def _remove_if_file_exists(path_file):
if os.path.exists(path_file):
os.remove(path_file)
head_file_pch = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.gch')
head_file_signature = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.sign')
_remove_if_file_exists(head_file_pch)
_remove_if_file_exists(head_file_signature)
def load_inline(name,
cpp_sources,
cuda_sources=None,
sycl_sources=None,
functions=None,
extra_cflags=None,
extra_cuda_cflags=None,
extra_sycl_cflags=None,
extra_ldflags=None,
extra_include_paths=None,
build_directory=None,
verbose=False,
with_cuda=None,
with_sycl=None,
is_python_module=True,
with_pytorch_error_handling=True,
keep_intermediates=True,
use_pch=False):
r'''
Load a PyTorch C++ extension just-in-time (JIT) from string sources.
This function behaves exactly like :func:`load`, but takes its sources as
strings rather than filenames. These strings are stored to files in the
build directory, after which the behavior of :func:`load_inline` is
identical to :func:`load`.
See `the
tests <https://github.com/pytorch/pytorch/blob/master/test/test_cpp_extensions_jit.py>`_
for good examples of using this function.
Sources may omit two required parts of a typical non-inline C++ extension:
the necessary header includes, as well as the (pybind11) binding code. More
precisely, strings passed to ``cpp_sources`` are first concatenated into a
single ``.cpp`` file. This file is then prepended with ``#include
<torch/extension.h>``.
Furthermore, if the ``functions`` argument is supplied, bindings will be
automatically generated for each function specified. ``functions`` can
either be a list of function names, or a dictionary mapping from function
names to docstrings. If a list is given, the name of each function is used
as its docstring.
The sources in ``cuda_sources`` are concatenated into a separate ``.cu``
file and prepended with ``torch/types.h``, ``cuda.h`` and
``cuda_runtime.h`` includes. The ``.cpp`` and ``.cu`` files are compiled
separately, but ultimately linked into a single library. Note that no
bindings are generated for functions in ``cuda_sources`` per se. To bind
to a CUDA kernel, you must create a C++ function that calls it, and either
declare or define this C++ function in one of the ``cpp_sources`` (and
include its name in ``functions``).
The sources in ``sycl_sources`` are concatenated into a separate ``.sycl``
file and prepended with ``torch/types.h``, ``sycl/sycl.hpp`` includes.
The ``.cpp`` and ``.sycl`` files are compiled separately, but ultimately
linked into a single library. Note that no bindings are generated for
functions in ``sycl_sources`` per se. To bind to a SYCL kernel, you must
create a C++ function that calls it, and either declare or define this
C++ function in one of the ``cpp_sources`` (and include its name
in ``functions``).
See :func:`load` for a description of arguments omitted below.
Args:
cpp_sources: A string, or list of strings, containing C++ source code.
cuda_sources: A string, or list of strings, containing CUDA source code.
sycl_sources: A string, or list of strings, containing SYCL source code.
functions: A list of function names for which to generate function
bindings. If a dictionary is given, it should map function names to
docstrings (which are otherwise just the function names).
with_cuda: Determines whether CUDA headers and libraries are added to
the build. If set to ``None`` (default), this value is
automatically determined based on whether ``cuda_sources`` is
provided. Set it to ``True`` to force CUDA headers
and libraries to be included.
with_sycl: Determines whether SYCL headers and libraries are added to
the build. If set to ``None`` (default), this value is
automatically determined based on whether ``sycl_sources`` is
provided. Set it to ``True`` to force SYCL headers
and libraries to be included.
with_pytorch_error_handling: Determines whether pytorch error and
warning macros are handled by pytorch instead of pybind. To do
this, each function ``foo`` is called via an intermediary ``_safe_foo``
function. This redirection might cause issues in obscure cases
of cpp. This flag should be set to ``False`` when this redirect
causes issues.
Example:
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT)
>>> from torch.utils.cpp_extension import load_inline
>>> source = """
at::Tensor sin_add(at::Tensor x, at::Tensor y) {
return x.sin() + y.sin();
}
"""
>>> module = load_inline(name='inline_extension',
... cpp_sources=[source],
... functions=['sin_add'])
.. note::
Since load_inline will just-in-time compile the source code, please ensure
that you have the right toolchains installed in the runtime. For example,
when loading C++, make sure a C++ compiler is available. If you're loading
a CUDA extension, you will need to additionally install the corresponding CUDA
toolkit (nvcc and any other dependencies your code has). Compiling toolchains
are not included when you install torch and must be additionally installed.
During compiling, by default, the Ninja backend uses #CPUS + 2 workers to build
the extension. This may use up too many resources on some systems. One
can control the number of workers by setting the `MAX_JOBS` environment
variable to a non-negative number.
'''
build_directory = build_directory or _get_build_directory(name, verbose)
if isinstance(cpp_sources, str):
cpp_sources = [cpp_sources]
cuda_sources = cuda_sources or []
if isinstance(cuda_sources, str):
cuda_sources = [cuda_sources]
sycl_sources = sycl_sources or []
if isinstance(sycl_sources, str):
sycl_sources = [sycl_sources]
cpp_sources.insert(0, '#include <torch/extension.h>')
if use_pch is True:
# Using PreCompile Header('torch/extension.h') to reduce compile time.
_check_and_build_extension_h_precompiler_headers(extra_cflags, extra_include_paths)
else:
remove_extension_h_precompiler_headers()
# If `functions` is supplied, we create the pybind11 bindings for the user.
# Here, `functions` is (or becomes, after some processing) a map from
# function names to function docstrings.
if functions is not None:
module_def = []
module_def.append('PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {')
if isinstance(functions, str):
functions = [functions]
if isinstance(functions, list):
# Make the function docstring the same as the function name.
functions = {f: f for f in functions}
elif not isinstance(functions, dict):
raise ValueError(f"Expected 'functions' to be a list or dict, but was {type(functions)}")
for function_name, docstring in functions.items():
if with_pytorch_error_handling:
module_def.append(f'm.def("{function_name}", torch::wrap_pybind_function({function_name}), "{docstring}");')
else:
module_def.append(f'm.def("{function_name}", {function_name}, "{docstring}");')
module_def.append('}')
cpp_sources += module_def
cpp_source_path = os.path.join(build_directory, 'main.cpp')
_maybe_write(cpp_source_path, "\n".join(cpp_sources))
sources = [cpp_source_path]
if cuda_sources:
cuda_sources.insert(0, '#include <torch/types.h>')
cuda_sources.insert(1, '#include <cuda.h>')
cuda_sources.insert(2, '#include <cuda_runtime.h>')
cuda_source_path = os.path.join(build_directory, 'cuda.cu')
_maybe_write(cuda_source_path, "\n".join(cuda_sources))
sources.append(cuda_source_path)
if sycl_sources:
sycl_sources.insert(0, '#include <torch/types.h>')
sycl_sources.insert(1, '#include <sycl/sycl.hpp>')
sycl_source_path = os.path.join(build_directory, 'sycl.sycl')
_maybe_write(sycl_source_path, "\n".join(sycl_sources))
sources.append(sycl_source_path)
return _jit_compile(
name,
sources,
extra_cflags,
extra_cuda_cflags,
extra_sycl_cflags,
extra_ldflags,
extra_include_paths,
build_directory,
verbose,
with_cuda,
with_sycl,
is_python_module,
is_standalone=False,
keep_intermediates=keep_intermediates)
def _jit_compile(name,
sources,
extra_cflags,
extra_cuda_cflags,
extra_sycl_cflags,
extra_ldflags,
extra_include_paths,
build_directory: str,
verbose: bool,
with_cuda: Optional[bool],
with_sycl: Optional[bool],
is_python_module,
is_standalone,
keep_intermediates=True) -> None:
if is_python_module and is_standalone:
raise ValueError("`is_python_module` and `is_standalone` are mutually exclusive.")
if with_cuda is None:
with_cuda = any(map(_is_cuda_file, sources))
with_cudnn = any('cudnn' in f for f in extra_ldflags or [])
if with_sycl is None:
with_sycl = any(map(_is_sycl_file, sources))
old_version = JIT_EXTENSION_VERSIONER.get_version(name)
version = JIT_EXTENSION_VERSIONER.bump_version_if_changed(
name,
sources,
build_arguments=[extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths],
build_directory=build_directory,
with_cuda=with_cuda,
with_sycl=with_sycl,
is_python_module=is_python_module,
is_standalone=is_standalone,
)
if version > 0:
if version != old_version and verbose:
print(f'The input conditions for extension module {name} have changed. ' +
f'Bumping to version {version} and re-building as {name}_v{version}...',
file=sys.stderr)
name = f'{name}_v{version}'
baton = FileBaton(os.path.join(build_directory, 'lock'))
if baton.try_acquire():
try:
if version != old_version:
with GeneratedFileCleaner(keep_intermediates=keep_intermediates) as clean_ctx:
if IS_HIP_EXTENSION and (with_cuda or with_cudnn):
hipify_result = hipify_python.hipify(
project_directory=build_directory,
output_directory=build_directory,
header_include_dirs=(extra_include_paths if extra_include_paths is not None else []),
extra_files=[os.path.abspath(s) for s in sources],
ignores=[_join_rocm_home('*'), os.path.join(_TORCH_PATH, '*')], # no need to hipify ROCm or PyTorch headers
show_detailed=verbose,
show_progress=verbose,
is_pytorch_extension=True,
clean_ctx=clean_ctx
)
hipified_sources = set()
for source in sources:
s_abs = os.path.abspath(source)
hipified_sources.add(hipify_result[s_abs].hipified_path if s_abs in hipify_result else s_abs)
sources = list(hipified_sources)
_write_ninja_file_and_build_library(
name=name,
sources=sources,
extra_cflags=extra_cflags or [],
extra_cuda_cflags=extra_cuda_cflags or [],
extra_sycl_cflags=extra_sycl_cflags or [],
extra_ldflags=extra_ldflags or [],
extra_include_paths=extra_include_paths or [],
build_directory=build_directory,
verbose=verbose,
with_cuda=with_cuda,
with_sycl=with_sycl,
is_standalone=is_standalone)
elif verbose:
print('No modifications detected for re-loaded extension '
f'module {name}, skipping build step...', file=sys.stderr)
finally:
baton.release()
else:
baton.wait()
if verbose:
print(f'Loading extension module {name}...', file=sys.stderr)
if is_standalone:
return _get_exec_path(name, build_directory)
return _import_module_from_library(name, build_directory, is_python_module)
def _write_ninja_file_and_compile_objects(
sources: list[str],
objects,
cflags,
post_cflags,
cuda_cflags,
cuda_post_cflags,
cuda_dlink_post_cflags,
sycl_cflags,
sycl_post_cflags,
sycl_dlink_post_cflags,
build_directory: str,
verbose: bool,
with_cuda: Optional[bool],
with_sycl: Optional[bool]) -> None:
verify_ninja_availability()
compiler = get_cxx_compiler()
get_compiler_abi_compatibility_and_version(compiler)
if with_cuda is None:
with_cuda = any(map(_is_cuda_file, sources))
if with_sycl is None:
with_sycl = any(map(_is_sycl_file, sources))
build_file_path = os.path.join(build_directory, 'build.ninja')
if verbose:
print(f'Emitting ninja build file {build_file_path}...', file=sys.stderr)
# Create build_directory if it does not exist
if not os.path.exists(build_directory):
if verbose:
print(f'Creating directory {build_directory}...', file=sys.stderr)
# This is like mkdir -p, i.e. will also create parent directories.
os.makedirs(build_directory, exist_ok=True)
_write_ninja_file(
path=build_file_path,
cflags=cflags,
post_cflags=post_cflags,
cuda_cflags=cuda_cflags,
cuda_post_cflags=cuda_post_cflags,
cuda_dlink_post_cflags=cuda_dlink_post_cflags,
sycl_cflags=sycl_cflags,
sycl_post_cflags=sycl_post_cflags,
sycl_dlink_post_cflags=sycl_dlink_post_cflags,
sources=sources,
objects=objects,
ldflags=None,
library_target=None,
with_cuda=with_cuda,
with_sycl=with_sycl)
if verbose:
print('Compiling objects...', file=sys.stderr)
_run_ninja_build(
build_directory,
verbose,
# It would be better if we could tell users the name of the extension
# that failed to build but there isn't a good way to get it here.
error_prefix='Error compiling objects for extension')
def _write_ninja_file_and_build_library(
name,
sources: list[str],
extra_cflags,
extra_cuda_cflags,
extra_sycl_cflags,
extra_ldflags,
extra_include_paths,
build_directory: str,
verbose: bool,
with_cuda: Optional[bool],
with_sycl: Optional[bool],
is_standalone: bool = False) -> None:
verify_ninja_availability()
compiler = get_cxx_compiler()
get_compiler_abi_compatibility_and_version(compiler)
if with_cuda is None:
with_cuda = any(map(_is_cuda_file, sources))
if with_sycl is None:
with_sycl = any(map(_is_sycl_file, sources))
extra_ldflags = _prepare_ldflags(
extra_ldflags or [],
with_cuda,
verbose,
is_standalone)
build_file_path = os.path.join(build_directory, 'build.ninja')
if verbose:
print(f'Emitting ninja build file {build_file_path}...', file=sys.stderr)
# Create build_directory if it does not exist
if not os.path.exists(build_directory):
if verbose:
print(f'Creating directory {build_directory}...', file=sys.stderr)
# This is like mkdir -p, i.e. will also create parent directories.
os.makedirs(build_directory, exist_ok=True)
# NOTE: Emitting a new ninja build file does not cause re-compilation if
# the sources did not change, so it's ok to re-emit (and it's fast).
_write_ninja_file_to_build_library(
path=build_file_path,
name=name,
sources=sources,
extra_cflags=extra_cflags or [],
extra_cuda_cflags=extra_cuda_cflags or [],
extra_sycl_cflags=extra_sycl_cflags or [],
extra_ldflags=extra_ldflags or [],
extra_include_paths=extra_include_paths or [],
with_cuda=with_cuda,
with_sycl=with_sycl,
is_standalone=is_standalone)
if verbose:
print(f'Building extension module {name}...', file=sys.stderr)
_run_ninja_build(
build_directory,
verbose,
error_prefix=f"Error building extension '{name}'")
def is_ninja_available():
"""Return ``True`` if the `ninja <https://ninja-build.org/>`_ build system is available on the system, ``False`` otherwise."""
try:
subprocess.check_output('ninja --version'.split())
except Exception:
return False
else:
return True
def verify_ninja_availability():
"""Raise ``RuntimeError`` if `ninja <https://ninja-build.org/>`_ build system is not available on the system, does nothing otherwise."""
if not is_ninja_available():
raise RuntimeError("Ninja is required to load C++ extensions")
def _prepare_ldflags(extra_ldflags, with_cuda, verbose, is_standalone):
if IS_WINDOWS:
python_lib_path = os.path.join(sys.base_exec_prefix, 'libs')
extra_ldflags.append('c10.lib')
if with_cuda:
extra_ldflags.append('c10_cuda.lib')
extra_ldflags.append('torch_cpu.lib')
if with_cuda:
extra_ldflags.append('torch_cuda.lib')
# /INCLUDE is used to ensure torch_cuda is linked against in a project that relies on it.
# Related issue: https://github.com/pytorch/pytorch/issues/31611
extra_ldflags.append('-INCLUDE:?warp_size@cuda@at@@YAHXZ')
extra_ldflags.append('torch.lib')
extra_ldflags.append(f'/LIBPATH:{TORCH_LIB_PATH}')
if not is_standalone:
extra_ldflags.append('torch_python.lib')
extra_ldflags.append(f'/LIBPATH:{python_lib_path}')
else:
extra_ldflags.append(f'-L{TORCH_LIB_PATH}')
extra_ldflags.append('-lc10')
if with_cuda:
extra_ldflags.append('-lc10_hip' if IS_HIP_EXTENSION else '-lc10_cuda')
extra_ldflags.append('-ltorch_cpu')
if with_cuda:
extra_ldflags.append('-ltorch_hip' if IS_HIP_EXTENSION else '-ltorch_cuda')
extra_ldflags.append('-ltorch')
if not is_standalone:
extra_ldflags.append('-ltorch_python')
if is_standalone:
extra_ldflags.append(f"-Wl,-rpath,{TORCH_LIB_PATH}")
if with_cuda:
if verbose:
print('Detected CUDA files, patching ldflags', file=sys.stderr)
if IS_WINDOWS:
extra_ldflags.append(f'/LIBPATH:{_join_cuda_home("lib", "x64")}')
extra_ldflags.append('cudart.lib')
if CUDNN_HOME is not None:
extra_ldflags.append(f'/LIBPATH:{os.path.join(CUDNN_HOME, "lib", "x64")}')
elif not IS_HIP_EXTENSION:
extra_lib_dir = "lib64"
if (not os.path.exists(_join_cuda_home(extra_lib_dir)) and
os.path.exists(_join_cuda_home("lib"))):
# 64-bit CUDA may be installed in "lib"
# Note that it's also possible both don't exist (see _find_cuda_home) - in that case we stay with "lib64"
extra_lib_dir = "lib"
extra_ldflags.append(f'-L{_join_cuda_home(extra_lib_dir)}')
extra_ldflags.append('-lcudart')
if CUDNN_HOME is not None:
extra_ldflags.append(f'-L{os.path.join(CUDNN_HOME, "lib64")}')
elif IS_HIP_EXTENSION:
extra_ldflags.append(f'-L{_join_rocm_home("lib")}')
extra_ldflags.append('-lamdhip64')
return extra_ldflags
def _get_cuda_arch_flags(cflags: Optional[list[str]] = None) -> list[str]:
"""
Determine CUDA arch flags to use.
For an arch, say "6.1", the added compile flag will be
``-gencode=arch=compute_61,code=sm_61``.
For an added "+PTX", an additional
``-gencode=arch=compute_xx,code=compute_xx`` is added.
See select_compute_arch.cmake for corresponding named and supported arches
when building with CMake.
"""
# If cflags is given, there may already be user-provided arch flags in it
# (from `extra_compile_args`)
if cflags is not None:
for flag in cflags:
if 'TORCH_EXTENSION_NAME' in flag:
continue
if 'arch' in flag:
return []
# Note: keep combined names ("arch1+arch2") above single names, otherwise
# string replacement may not do the right thing
named_arches = collections.OrderedDict([
('Kepler+Tesla', '3.7'),
('Kepler', '3.5+PTX'),
('Maxwell+Tegra', '5.3'),
('Maxwell', '5.0;5.2+PTX'),
('Pascal', '6.0;6.1+PTX'),
('Volta+Tegra', '7.2'),
('Volta', '7.0+PTX'),
('Turing', '7.5+PTX'),
('Ampere+Tegra', '8.7'),
('Ampere', '8.0;8.6+PTX'),
('Ada', '8.9+PTX'),
('Hopper', '9.0+PTX'),
('Blackwell+Tegra', '10.1'),
('Blackwell', '10.0;12.0+PTX'),
])
supported_arches = ['3.5', '3.7', '5.0', '5.2', '5.3', '6.0', '6.1', '6.2',
'7.0', '7.2', '7.5', '8.0', '8.6', '8.7', '8.9', '9.0', '9.0a',
'10.0', '10.0a', '10.1', '10.1a', '12.0', '12.0a']
valid_arch_strings = supported_arches + [s + "+PTX" for s in supported_arches]
# The default is sm_30 for CUDA 9.x and 10.x
# First check for an env var (same as used by the main setup.py)
# Can be one or more architectures, e.g. "6.1" or "3.5;5.2;6.0;6.1;7.0+PTX"
# See cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake
_arch_list = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
# If not given, determine what's best for the GPU / CUDA version that can be found
if not _arch_list:
warnings.warn(
"TORCH_CUDA_ARCH_LIST is not set, all archs for visible cards are included for compilation. \n"
"If this is not desired, please set os.environ['TORCH_CUDA_ARCH_LIST'].")
arch_list = []
# the assumption is that the extension should run on any of the currently visible cards,
# which could be of different types - therefore all archs for visible cards should be included
for i in range(torch.cuda.device_count()):
capability = torch.cuda.get_device_capability(i)
supported_sm = [int("".join(re.findall(r"\d+", arch.split('_')[1])))
for arch in torch.cuda.get_arch_list() if 'sm_' in arch]
max_supported_sm = max((sm // 10, sm % 10) for sm in supported_sm)
# Capability of the device may be higher than what's supported by the user's
# NVCC, causing compilation error. User's NVCC is expected to match the one
# used to build pytorch, so we use the maximum supported capability of pytorch
# to clamp the capability.
capability = min(max_supported_sm, capability)
arch = f'{capability[0]}.{capability[1]}'
if arch not in arch_list:
arch_list.append(arch)
arch_list = sorted(arch_list)
arch_list[-1] += '+PTX'
else:
# Deal with lists that are ' ' separated (only deal with ';' after)
_arch_list = _arch_list.replace(' ', ';')
# Expand named arches
for named_arch, archval in named_arches.items():
_arch_list = _arch_list.replace(named_arch, archval)
arch_list = _arch_list.split(';')
flags = []
for arch in arch_list:
if arch not in valid_arch_strings:
raise ValueError(f"Unknown CUDA arch ({arch}) or GPU not supported")
else:
# Handle both single and double-digit architecture versions
version = arch.split('+')[0] # Remove "+PTX" if present
major, minor = version.split('.')
num = f"{major}{minor}"
flags.append(f'-gencode=arch=compute_{num},code=sm_{num}')
if arch.endswith('+PTX'):
flags.append(f'-gencode=arch=compute_{num},code=compute_{num}')
return sorted(set(flags))
def _get_rocm_arch_flags(cflags: Optional[list[str]] = None) -> list[str]:
# If cflags is given, there may already be user-provided arch flags in it
# (from `extra_compile_args`)
if cflags is not None:
for flag in cflags:
if 'amdgpu-target' in flag or 'offload-arch' in flag:
return ['-fno-gpu-rdc']
# Use same defaults as used for building PyTorch
# Allow env var to override, just like during initial cmake build.
_archs = os.environ.get('PYTORCH_ROCM_ARCH', None)
if not _archs:
archFlags = torch._C._cuda_getArchFlags()
if archFlags:
archs = archFlags.split()
else:
archs = []
else:
archs = _archs.replace(' ', ';').split(';')
flags = [f'--offload-arch={arch}' for arch in archs]
flags += ['-fno-gpu-rdc']
return flags
def _get_build_directory(name: str, verbose: bool) -> str:
root_extensions_directory = os.environ.get('TORCH_EXTENSIONS_DIR')
if root_extensions_directory is None:
root_extensions_directory = get_default_build_root()
cu_str = ('cpu' if torch.version.cuda is None else
f'cu{torch.version.cuda.replace(".", "")}')
python_version = f'py{sys.version_info.major}{sys.version_info.minor}{getattr(sys, "abiflags", "")}'
build_folder = f'{python_version}_{cu_str}'
root_extensions_directory = os.path.join(
root_extensions_directory, build_folder)
if verbose:
print(f'Using {root_extensions_directory} as PyTorch extensions root...', file=sys.stderr)
build_directory = os.path.join(root_extensions_directory, name)
if not os.path.exists(build_directory):
if verbose:
print(f'Creating extension directory {build_directory}...', file=sys.stderr)
# This is like mkdir -p, i.e. will also create parent directories.
os.makedirs(build_directory, exist_ok=True)
return build_directory
def _get_num_workers(verbose: bool) -> Optional[int]:
max_jobs = os.environ.get('MAX_JOBS')
if max_jobs is not None and max_jobs.isdigit():
if verbose:
print(f'Using envvar MAX_JOBS ({max_jobs}) as the number of workers...',
file=sys.stderr)
return int(max_jobs)
if verbose:
print('Allowing ninja to set a default number of workers... '
'(overridable by setting the environment variable MAX_JOBS=N)',
file=sys.stderr)
return None
def _get_vc_env(vc_arch: str) -> dict[str, str]:
try:
from setuptools import distutils
return distutils._msvccompiler._get_vc_env(vc_arch)
except AttributeError:
from setuptools._distutils import _msvccompiler
return _msvccompiler._get_vc_env(vc_arch)
def _run_ninja_build(build_directory: str, verbose: bool, error_prefix: str) -> None:
command = ['ninja', '-v']
num_workers = _get_num_workers(verbose)
if num_workers is not None:
command.extend(['-j', str(num_workers)])
env = os.environ.copy()
# Try to activate the vc env for the users
if IS_WINDOWS and 'VSCMD_ARG_TGT_ARCH' not in env:
from setuptools import distutils
plat_name = distutils.util.get_platform()
plat_spec = PLAT_TO_VCVARS[plat_name]
vc_env = {k.upper(): v for k, v in _get_vc_env(plat_spec).items()}
for k, v in env.items():
uk = k.upper()
if uk not in vc_env:
vc_env[uk] = v
env = vc_env
try:
sys.stdout.flush()
sys.stderr.flush()
# Warning: don't pass stdout=None to subprocess.run to get output.
# subprocess.run assumes that sys.__stdout__ has not been modified and
# attempts to write to it by default. However, when we call _run_ninja_build
# from ahead-of-time cpp extensions, the following happens:
# 1) If the stdout encoding is not utf-8, setuptools detachs __stdout__.
# https://github.com/pypa/setuptools/blob/7e97def47723303fafabe48b22168bbc11bb4821/setuptools/dist.py#L1110
# (it probably shouldn't do this)
# 2) subprocess.run (on POSIX, with no stdout override) relies on
# __stdout__ not being detached:
# https://github.com/python/cpython/blob/c352e6c7446c894b13643f538db312092b351789/Lib/subprocess.py#L1214
# To work around this, we pass in the fileno directly and hope that
# it is valid.
stdout_fileno = 1
subprocess.run(
command,
stdout=stdout_fileno if verbose else subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=build_directory,
check=True,
env=env)
except subprocess.CalledProcessError as e:
# Python 2 and 3 compatible way of getting the error object.
_, error, _ = sys.exc_info()
# error.output contains the stdout and stderr of the build attempt.
message = error_prefix
# `error` is a CalledProcessError (which has an `output`) attribute, but
# mypy thinks it's Optional[BaseException] and doesn't narrow
if hasattr(error, 'output') and error.output: # type: ignore[union-attr]
message += f": {error.output.decode(*SUBPROCESS_DECODE_ARGS)}" # type: ignore[union-attr]
raise RuntimeError(message) from e
def _get_exec_path(module_name, path):
if IS_WINDOWS and TORCH_LIB_PATH not in os.getenv('PATH', '').split(';'):
torch_lib_in_path = any(
os.path.exists(p) and os.path.samefile(p, TORCH_LIB_PATH)
for p in os.getenv('PATH', '').split(';')
)
if not torch_lib_in_path:
os.environ['PATH'] = f"{TORCH_LIB_PATH};{os.getenv('PATH', '')}"
return os.path.join(path, f'{module_name}{EXEC_EXT}')
def _import_module_from_library(module_name, path, is_python_module):
filepath = os.path.join(path, f"{module_name}{LIB_EXT}")
if is_python_module:
# https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
spec = importlib.util.spec_from_file_location(module_name, filepath)
assert spec is not None
module = importlib.util.module_from_spec(spec)
assert isinstance(spec.loader, importlib.abc.Loader)
spec.loader.exec_module(module)
return module
else:
torch.ops.load_library(filepath)
return filepath
def _write_ninja_file_to_build_library(path,
name,
sources,
extra_cflags,
extra_cuda_cflags,
extra_sycl_cflags,
extra_ldflags,
extra_include_paths,
with_cuda,
with_sycl,
is_standalone) -> None:
extra_cflags = [flag.strip() for flag in extra_cflags]
extra_cuda_cflags = [flag.strip() for flag in extra_cuda_cflags]
extra_sycl_cflags = [flag.strip() for flag in extra_sycl_cflags]
extra_ldflags = [flag.strip() for flag in extra_ldflags]
extra_include_paths = [flag.strip() for flag in extra_include_paths]
# Turn into absolute paths so we can emit them into the ninja build
# file wherever it is.
user_includes = [os.path.abspath(file) for file in extra_include_paths]
# include_paths() gives us the location of torch/extension.h
# TODO generalize with_cuda as specific device type.
if with_cuda:
system_includes = include_paths("cuda")
else:
system_includes = include_paths("cpu")
# sysconfig.get_path('include') gives us the location of Python.h
# Explicitly specify 'posix_prefix' scheme on non-Windows platforms to workaround error on some MacOS
# installations where default `get_path` points to non-existing `/Library/Python/M.m/include` folder
python_include_path = sysconfig.get_path('include', scheme='nt' if IS_WINDOWS else 'posix_prefix')
if python_include_path is not None:
system_includes.append(python_include_path)
common_cflags = []
if not is_standalone:
common_cflags.append(f'-DTORCH_EXTENSION_NAME={name}')
common_cflags.append('-DTORCH_API_INCLUDE_EXTENSION_H')
common_cflags += [f"{x}" for x in _get_pybind11_abi_build_flags()]
# Windows does not understand `-isystem` and quotes flags later.
if IS_WINDOWS:
common_cflags += [f'-I{include}' for include in user_includes + system_includes]
else:
common_cflags += [f'-I{shlex.quote(include)}' for include in user_includes]
common_cflags += [f'-isystem {shlex.quote(include)}' for include in system_includes]
common_cflags += [f"{x}" for x in _get_glibcxx_abi_build_flags()]
if IS_WINDOWS:
cflags = common_cflags + COMMON_MSVC_FLAGS + ['/std:c++17'] + extra_cflags
cflags = _nt_quote_args(cflags)
else:
cflags = common_cflags + ['-fPIC', '-std=c++17'] + extra_cflags
if with_cuda and IS_HIP_EXTENSION:
cuda_flags = ['-DWITH_HIP'] + cflags + COMMON_HIP_FLAGS + COMMON_HIPCC_FLAGS
cuda_flags += extra_cuda_cflags
cuda_flags += _get_rocm_arch_flags(cuda_flags)
elif with_cuda:
cuda_flags = common_cflags + COMMON_NVCC_FLAGS + _get_cuda_arch_flags()
if IS_WINDOWS:
for flag in COMMON_MSVC_FLAGS:
cuda_flags = ['-Xcompiler', flag] + cuda_flags
for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS:
cuda_flags = ['-Xcudafe', '--diag_suppress=' + ignore_warning] + cuda_flags
cuda_flags = cuda_flags + ['-std=c++17']
cuda_flags = _nt_quote_args(cuda_flags)
cuda_flags += _nt_quote_args(extra_cuda_cflags)
else:
cuda_flags += ['--compiler-options', "'-fPIC'"]
cuda_flags += extra_cuda_cflags
if not any(flag.startswith('-std=') for flag in cuda_flags):
cuda_flags.append('-std=c++17')
cc_env = os.getenv("CC")
if cc_env is not None:
cuda_flags = ['-ccbin', cc_env] + cuda_flags
else:
cuda_flags = None
if with_sycl:
sycl_cflags = cflags + _COMMON_SYCL_FLAGS
sycl_cflags += extra_sycl_cflags
_append_sycl_std_if_no_std_present(sycl_cflags)
host_cflags = cflags
# escaping quoted arguments to pass them thru SYCL compiler
host_cflags = [item.replace('\\"', '\\\\"') for item in host_cflags]
host_cflags = ' '.join(host_cflags)
sycl_cflags += _wrap_sycl_host_flags(host_cflags)
sycl_dlink_post_cflags = _SYCL_DLINK_FLAGS
else:
sycl_cflags = None
sycl_dlink_post_cflags = None
def object_file_path(source_file: str) -> str:
# '/path/to/file.cpp' -> 'file'
file_name = os.path.splitext(os.path.basename(source_file))[0]
if _is_cuda_file(source_file) and with_cuda:
# Use a different object filename in case a C++ and CUDA file have
# the same filename but different extension (.cpp vs. .cu).
target = f'{file_name}.cuda.o'
elif _is_sycl_file(source_file) and with_sycl:
target = f'{file_name}.sycl.o'
else:
target = f'{file_name}.o'
return target
objects = [object_file_path(src) for src in sources]
ldflags = ([] if is_standalone else [SHARED_FLAG]) + extra_ldflags
# The darwin linker needs explicit consent to ignore unresolved symbols.
if IS_MACOS:
ldflags.append('-undefined dynamic_lookup')
elif IS_WINDOWS:
ldflags = _nt_quote_args(ldflags)
ext = EXEC_EXT if is_standalone else LIB_EXT
library_target = f'{name}{ext}'
_write_ninja_file(
path=path,
cflags=cflags,
post_cflags=None,
cuda_cflags=cuda_flags,
cuda_post_cflags=None,
cuda_dlink_post_cflags=None,
sycl_cflags=sycl_cflags,
sycl_post_cflags=[],
sycl_dlink_post_cflags=sycl_dlink_post_cflags,
sources=sources,
objects=objects,
ldflags=ldflags,
library_target=library_target,
with_cuda=with_cuda,
with_sycl=with_sycl)
def _write_ninja_file(path,
cflags,
post_cflags,
cuda_cflags,
cuda_post_cflags,
cuda_dlink_post_cflags,
sycl_cflags,
sycl_post_cflags,
sycl_dlink_post_cflags,
sources,
objects,
ldflags,
library_target,
with_cuda,
with_sycl) -> None:
r"""Write a ninja file that does the desired compiling and linking.
`path`: Where to write this file
`cflags`: list of flags to pass to $cxx. Can be None.
`post_cflags`: list of flags to append to the $cxx invocation. Can be None.
`cuda_cflags`: list of flags to pass to $nvcc. Can be None.
`cuda_post_cflags`: list of flags to append to the $nvcc invocation. Can be None.
`cuda_dlink_post_cflags`: list of flags to append to the $nvcc device code link invocation. Can be None.
`sycl_cflags`: list of flags to pass to SYCL compiler. Can be None.
`sycl_post_cflags`: list of flags to append to the SYCL compiler invocation. Can be None.
`sycl_dlink_post_cflags`: list of flags to append to the SYCL compiler device code link invocation. Can be None.
e.
`sources`: list of paths to source files
`objects`: list of desired paths to objects, one per source.
`ldflags`: list of flags to pass to linker. Can be None.
`library_target`: Name of the output library. Can be None; in that case,
we do no linking.
`with_cuda`: If we should be compiling with CUDA.
"""
def sanitize_flags(flags):
if flags is None:
return []
else:
return [flag.strip() for flag in flags]
cflags = sanitize_flags(cflags)
post_cflags = sanitize_flags(post_cflags)
cuda_cflags = sanitize_flags(cuda_cflags)
cuda_post_cflags = sanitize_flags(cuda_post_cflags)
cuda_dlink_post_cflags = sanitize_flags(cuda_dlink_post_cflags)
sycl_cflags = sanitize_flags(sycl_cflags)
sycl_post_cflags = sanitize_flags(sycl_post_cflags)
sycl_dlink_post_cflags = sanitize_flags(sycl_dlink_post_cflags)
ldflags = sanitize_flags(ldflags)
# Sanity checks...
assert len(sources) == len(objects)
assert len(sources) > 0
compiler = get_cxx_compiler()
# Version 1.3 is required for the `deps` directive.
config = ['ninja_required_version = 1.3']
config.append(f'cxx = {compiler}')
if with_cuda or cuda_dlink_post_cflags:
if "PYTORCH_NVCC" in os.environ:
nvcc = os.getenv("PYTORCH_NVCC") # user can set nvcc compiler with ccache using the environment variable here
else:
if IS_HIP_EXTENSION:
nvcc = _join_rocm_home('bin', 'hipcc')
else:
nvcc = _join_cuda_home('bin', 'nvcc')
config.append(f'nvcc = {nvcc}')
if with_sycl or sycl_dlink_post_cflags:
sycl = 'icx' if IS_WINDOWS else 'icpx'
config.append(f'sycl = {sycl}')
if IS_HIP_EXTENSION:
post_cflags = COMMON_HIP_FLAGS + post_cflags
flags = [f'cflags = {" ".join(cflags)}']
flags.append(f'post_cflags = {" ".join(post_cflags)}')
if with_cuda:
flags.append(f'cuda_cflags = {" ".join(cuda_cflags)}')
flags.append(f'cuda_post_cflags = {" ".join(cuda_post_cflags)}')
flags.append(f'cuda_dlink_post_cflags = {" ".join(cuda_dlink_post_cflags)}')
if with_sycl:
flags.append(f'sycl_cflags = {" ".join(sycl_cflags)}')
flags.append(f'sycl_post_cflags = {" ".join(sycl_post_cflags)}')
flags.append(f'sycl_dlink_post_cflags = {" ".join(sycl_dlink_post_cflags)}')
flags.append(f'ldflags = {" ".join(ldflags)}')
# Turn into absolute paths so we can emit them into the ninja build
# file wherever it is.
sources = [os.path.abspath(file) for file in sources]
# See https://ninja-build.org/build.ninja.html for reference.
compile_rule = ['rule compile']
if IS_WINDOWS:
compile_rule.append(
' command = cl /showIncludes $cflags -c $in /Fo$out $post_cflags')
compile_rule.append(' deps = msvc')
else:
compile_rule.append(
' command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags')
compile_rule.append(' depfile = $out.d')
compile_rule.append(' deps = gcc')
if with_cuda:
cuda_compile_rule = ['rule cuda_compile']
nvcc_gendeps = ''
# --generate-dependencies-with-compile is not supported by ROCm
# Nvcc flag `--generate-dependencies-with-compile` is not supported by sccache, which may increase build time.
if torch.version.cuda is not None and os.getenv('TORCH_EXTENSION_SKIP_NVCC_GEN_DEPENDENCIES', '0') != '1':
cuda_compile_rule.append(' depfile = $out.d')
cuda_compile_rule.append(' deps = gcc')
# Note: non-system deps with nvcc are only supported
# on Linux so use --generate-dependencies-with-compile
# to make this work on Windows too.
nvcc_gendeps = '--generate-dependencies-with-compile --dependency-output $out.d'
cuda_compile_rule.append(
f' command = $nvcc {nvcc_gendeps} $cuda_cflags -c $in -o $out $cuda_post_cflags')
if with_sycl:
sycl_compile_rule = ['rule sycl_compile']
# SYCL compiler does not recognize .sycl extension automatically,
# so we pass '-x c++' explicitly notifying compiler of file format
sycl_compile_rule.append(
' command = $sycl $sycl_cflags -c -x c++ $in -o $out $sycl_post_cflags')
# Emit one build rule per source to enable incremental build.
build = []
for source_file, object_file in zip(sources, objects):
is_cuda_source = _is_cuda_file(source_file) and with_cuda
is_sycl_source = _is_sycl_file(source_file) and with_sycl
if is_cuda_source:
rule = 'cuda_compile'
elif is_sycl_source:
rule = 'sycl_compile'
else:
rule = 'compile'
if IS_WINDOWS:
source_file = source_file.replace(':', '$:')
object_file = object_file.replace(':', '$:')
source_file = source_file.replace(" ", "$ ")
object_file = object_file.replace(" ", "$ ")
build.append(f'build {object_file}: {rule} {source_file}')
if cuda_dlink_post_cflags:
cuda_devlink_out = os.path.join(os.path.dirname(objects[0]), 'dlink.o')
cuda_devlink_rule = ['rule cuda_devlink']
cuda_devlink_rule.append(' command = $nvcc $in -o $out $cuda_dlink_post_cflags')
cuda_devlink = [f'build {cuda_devlink_out}: cuda_devlink {" ".join(objects)}']
objects += [cuda_devlink_out]
else:
cuda_devlink_rule, cuda_devlink = [], []
if sycl_dlink_post_cflags:
sycl_devlink_out = os.path.join(os.path.dirname(objects[0]), 'sycl_dlink.o')
sycl_devlink_rule = ['rule sycl_devlink']
sycl_devlink_rule.append(' command = $sycl $in -o $out $sycl_dlink_post_cflags')
sycl_devlink = [f'build {sycl_devlink_out}: sycl_devlink {" ".join(objects)}']
objects += [sycl_devlink_out]
else:
sycl_devlink_rule, sycl_devlink = [], []
if library_target is not None:
link_rule = ['rule link']
if IS_WINDOWS:
cl_paths = subprocess.check_output(['where',
'cl']).decode(*SUBPROCESS_DECODE_ARGS).split('\r\n')
if len(cl_paths) >= 1:
cl_path = os.path.dirname(cl_paths[0]).replace(':', '$:')
else:
raise RuntimeError("MSVC is required to load C++ extensions")
link_rule.append(f' command = "{cl_path}/link.exe" $in /nologo $ldflags /out:$out')
else:
link_rule.append(' command = $cxx $in $ldflags -o $out')
link = [f'build {library_target}: link {" ".join(objects)}']
default = [f'default {library_target}']
else:
link_rule, link, default = [], [], []
# 'Blocks' should be separated by newlines, for visual benefit.
blocks = [config, flags, compile_rule]
if with_cuda:
blocks.append(cuda_compile_rule) # type: ignore[possibly-undefined]
if with_sycl:
blocks.append(sycl_compile_rule) # type: ignore[possibly-undefined]
blocks += [cuda_devlink_rule, sycl_devlink_rule, link_rule, build, cuda_devlink, sycl_devlink, link, default]
content = "\n\n".join("\n".join(b) for b in blocks)
# Ninja requires a new lines at the end of the .ninja file
content += "\n"
_maybe_write(path, content)
def _join_cuda_home(*paths) -> str:
"""
Join paths with CUDA_HOME, or raises an error if it CUDA_HOME is not set.
This is basically a lazy way of raising an error for missing $CUDA_HOME
only once we need to get any CUDA-specific path.
"""
if CUDA_HOME is None:
raise OSError('CUDA_HOME environment variable is not set. '
'Please set it to your CUDA install root.')
return os.path.join(CUDA_HOME, *paths)
def _is_cuda_file(path: str) -> bool:
valid_ext = ['.cu', '.cuh']
if IS_HIP_EXTENSION:
valid_ext.append('.hip')
return os.path.splitext(path)[1] in valid_ext
def _is_sycl_file(path: str) -> bool:
valid_ext = ['.sycl']
return os.path.splitext(path)[1] in valid_ext
```
|
====================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 1.69 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\__init__.py
ENCODING: utf-8
```py
from torch.utils.data.dataloader import (
_DatasetKind,
DataLoader,
default_collate,
default_convert,
get_worker_info,
)
from torch.utils.data.datapipes._decorator import (
argument_validation,
functional_datapipe,
guaranteed_datapipes_determinism,
non_deterministic,
runtime_validation,
runtime_validation_disabled,
)
from torch.utils.data.datapipes.datapipe import (
DataChunk,
DFIterDataPipe,
IterDataPipe,
MapDataPipe,
)
from torch.utils.data.dataset import (
ChainDataset,
ConcatDataset,
Dataset,
IterableDataset,
random_split,
StackDataset,
Subset,
TensorDataset,
)
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import (
BatchSampler,
RandomSampler,
Sampler,
SequentialSampler,
SubsetRandomSampler,
WeightedRandomSampler,
)
__all__ = [
"BatchSampler",
"ChainDataset",
"ConcatDataset",
"DFIterDataPipe",
"DataChunk",
"DataLoader",
"Dataset",
"DistributedSampler",
"IterDataPipe",
"IterableDataset",
"MapDataPipe",
"RandomSampler",
"Sampler",
"SequentialSampler",
"StackDataset",
"Subset",
"SubsetRandomSampler",
"TensorDataset",
"WeightedRandomSampler",
"_DatasetKind",
"argument_validation",
"default_collate",
"default_convert",
"functional_datapipe",
"get_worker_info",
"guaranteed_datapipes_determinism",
"non_deterministic",
"random_split",
"runtime_validation",
"runtime_validation_disabled",
]
# Please keep this list sorted
assert __all__ == sorted(__all__)
```
|
===========================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 1.64 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\_utils\__init__.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
r"""Utility classes & functions for data loading. Code in this folder is mostly used by ../dataloder.py.
A lot of multiprocessing is used in data loading, which only supports running
functions defined in global environment (py2 can't serialize static methods).
Therefore, for code tidiness we put these functions into different files in this
folder.
"""
import atexit
import sys
# old private location of the ExceptionWrapper that some users rely on:
from torch._utils import ExceptionWrapper
IS_WINDOWS = sys.platform == "win32"
MP_STATUS_CHECK_INTERVAL = 5.0
r"""Interval (in seconds) to check status of processes to avoid hanging in
multiprocessing data loading. This is mainly used in getting data from
another process, in which case we need to periodically check whether the
sender is alive to prevent hanging."""
python_exit_status = False
r"""Whether Python is shutting down. This flag is guaranteed to be set before
the Python core library resources are freed, but Python may already be exiting
for some time when this is set.
Hook to set this flag is `_set_python_exit_flag`, and is inspired by a similar
hook in Python 3.7 multiprocessing library:
https://github.com/python/cpython/blob/d4d60134b29290049e28df54f23493de4f1824b6/Lib/multiprocessing/util.py#L277-L327
"""
try:
import numpy
HAS_NUMPY = True
except ModuleNotFoundError:
HAS_NUMPY = False
def _set_python_exit_flag():
global python_exit_status
python_exit_status = True
atexit.register(_set_python_exit_flag)
from . import collate, fetch, pin_memory, signal_handling, worker
```
|
==========================================================================================================================
SOURCE CODE FILE: collate.py
LINES: 1
SIZE: 15.98 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\_utils\collate.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
r"""Contains definitions of the methods used by the _BaseDataLoaderIter workers.
These methods are used to collate samples fetched from dataset into Tensor(s).
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
`default_collate` and `default_convert` are exposed to users via 'dataloader.py'.
"""
import collections
import contextlib
import copy
import re
from typing import Callable, Optional, Union
import torch
np_str_obj_array_pattern = re.compile(r"[SaUO]")
def default_convert(data):
r"""
Convert each NumPy array element into a :class:`torch.Tensor`.
If the input is a `Sequence`, `Collection`, or `Mapping`, it tries to convert each element inside to a :class:`torch.Tensor`.
If the input is not an NumPy array, it is left unchanged.
This is used as the default function for collation when both `batch_sampler` and `batch_size`
are NOT defined in :class:`~torch.utils.data.DataLoader`.
The general input type to output type mapping is similar to that
of :func:`~torch.utils.data.default_collate`. See the description there for more details.
Args:
data: a single data point to be converted
Examples:
>>> # xdoctest: +SKIP
>>> # Example with `int`
>>> default_convert(0)
0
>>> # Example with NumPy array
>>> default_convert(np.array([0, 1]))
tensor([0, 1])
>>> # Example with NamedTuple
>>> Point = namedtuple('Point', ['x', 'y'])
>>> default_convert(Point(0, 0))
Point(x=0, y=0)
>>> default_convert(Point(np.array(0), np.array(0)))
Point(x=tensor(0), y=tensor(0))
>>> # Example with List
>>> default_convert([np.array([0, 1]), np.array([2, 3])])
[tensor([0, 1]), tensor([2, 3])]
"""
elem_type = type(data)
if isinstance(data, torch.Tensor):
return data
elif (
elem_type.__module__ == "numpy"
and elem_type.__name__ != "str_"
and elem_type.__name__ != "string_"
):
# array of string classes and object
if (
elem_type.__name__ == "ndarray"
and np_str_obj_array_pattern.search(data.dtype.str) is not None
):
return data
return torch.as_tensor(data)
elif isinstance(data, collections.abc.Mapping):
try:
if isinstance(data, collections.abc.MutableMapping):
# The mapping type may have extra properties, so we can't just
# use `type(data)(...)` to create the new mapping.
# Create a clone and update it if the mapping type is mutable.
clone = copy.copy(data)
clone.update({key: default_convert(data[key]) for key in data})
return clone
else:
return elem_type({key: default_convert(data[key]) for key in data})
except TypeError:
# The mapping type may not support `copy()` / `update(mapping)`
# or `__init__(iterable)`.
return {key: default_convert(data[key]) for key in data}
elif isinstance(data, tuple) and hasattr(data, "_fields"): # namedtuple
return elem_type(*(default_convert(d) for d in data))
elif isinstance(data, tuple):
return [default_convert(d) for d in data] # Backwards compatibility.
elif isinstance(data, collections.abc.Sequence) and not isinstance(
data, (str, bytes)
):
try:
if isinstance(data, collections.abc.MutableSequence):
# The sequence type may have extra properties, so we can't just
# use `type(data)(...)` to create the new sequence.
# Create a clone and update it if the sequence type is mutable.
clone = copy.copy(data) # type: ignore[arg-type]
for i, d in enumerate(data):
clone[i] = default_convert(d)
return clone
else:
return elem_type([default_convert(d) for d in data])
except TypeError:
# The sequence type may not support `copy()` / `__setitem__(index, item)`
# or `__init__(iterable)` (e.g., `range`).
return [default_convert(d) for d in data]
else:
return data
default_collate_err_msg_format = (
"default_collate: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}"
)
def collate(
batch,
*,
collate_fn_map: Optional[dict[Union[type, tuple[type, ...]], Callable]] = None,
):
r"""
General collate function that handles collection type of element within each batch.
The function also opens function registry to deal with specific element types. `default_collate_fn_map`
provides default collate functions for tensors, numpy arrays, numbers and strings.
Args:
batch: a single batch to be collated
collate_fn_map: Optional dictionary mapping from element type to the corresponding collate function.
If the element type isn't present in this dictionary,
this function will go through each key of the dictionary in the insertion order to
invoke the corresponding collate function if the element type is a subclass of the key.
Examples:
>>> def collate_tensor_fn(batch, *, collate_fn_map):
... # Extend this function to handle batch of tensors
... return torch.stack(batch, 0)
>>> def custom_collate(batch):
... collate_map = {torch.Tensor: collate_tensor_fn}
... return collate(batch, collate_fn_map=collate_map)
>>> # Extend `default_collate` by in-place modifying `default_collate_fn_map`
>>> default_collate_fn_map.update({torch.Tensor: collate_tensor_fn})
Note:
Each collate function requires a positional argument for batch and a keyword argument
for the dictionary of collate functions as `collate_fn_map`.
"""
elem = batch[0]
elem_type = type(elem)
if collate_fn_map is not None:
if elem_type in collate_fn_map:
return collate_fn_map[elem_type](batch, collate_fn_map=collate_fn_map)
for collate_type in collate_fn_map:
if isinstance(elem, collate_type):
return collate_fn_map[collate_type](
batch, collate_fn_map=collate_fn_map
)
if isinstance(elem, collections.abc.Mapping):
try:
if isinstance(elem, collections.abc.MutableMapping):
# The mapping type may have extra properties, so we can't just
# use `type(data)(...)` to create the new mapping.
# Create a clone and update it if the mapping type is mutable.
clone = copy.copy(elem)
clone.update(
{
key: collate(
[d[key] for d in batch], collate_fn_map=collate_fn_map
)
for key in elem
}
)
return clone
else:
return elem_type(
{
key: collate(
[d[key] for d in batch], collate_fn_map=collate_fn_map
)
for key in elem
}
)
except TypeError:
# The mapping type may not support `copy()` / `update(mapping)`
# or `__init__(iterable)`.
return {
key: collate([d[key] for d in batch], collate_fn_map=collate_fn_map)
for key in elem
}
elif isinstance(elem, tuple) and hasattr(elem, "_fields"): # namedtuple
return elem_type(
*(
collate(samples, collate_fn_map=collate_fn_map)
for samples in zip(*batch)
)
)
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError("each element in list of batch should be of equal size")
transposed = list(zip(*batch)) # It may be accessed twice, so we use a list.
if isinstance(elem, tuple):
return [
collate(samples, collate_fn_map=collate_fn_map)
for samples in transposed
] # Backwards compatibility.
else:
try:
if isinstance(elem, collections.abc.MutableSequence):
# The sequence type may have extra properties, so we can't just
# use `type(data)(...)` to create the new sequence.
# Create a clone and update it if the sequence type is mutable.
clone = copy.copy(elem) # type: ignore[arg-type]
for i, samples in enumerate(transposed):
clone[i] = collate(samples, collate_fn_map=collate_fn_map)
return clone
else:
return elem_type(
[
collate(samples, collate_fn_map=collate_fn_map)
for samples in transposed
]
)
except TypeError:
# The sequence type may not support `copy()` / `__setitem__(index, item)`
# or `__init__(iterable)` (e.g., `range`).
return [
collate(samples, collate_fn_map=collate_fn_map)
for samples in transposed
]
raise TypeError(default_collate_err_msg_format.format(elem_type))
def collate_tensor_fn(
batch,
*,
collate_fn_map: Optional[dict[Union[type, tuple[type, ...]], Callable]] = None,
):
elem = batch[0]
out = None
if elem.is_nested:
raise RuntimeError(
"Batches of nested tensors are not currently supported by the default collate_fn; "
"please provide a custom collate_fn to handle them appropriately."
)
if elem.layout in {
torch.sparse_coo,
torch.sparse_csr,
torch.sparse_bsr,
torch.sparse_csc,
torch.sparse_bsc,
}:
raise RuntimeError(
"Batches of sparse tensors are not currently supported by the default collate_fn; "
"please provide a custom collate_fn to handle them appropriately."
)
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum(x.numel() for x in batch)
storage = elem._typed_storage()._new_shared(numel, device=elem.device)
out = elem.new(storage).resize_(len(batch), *list(elem.size()))
return torch.stack(batch, 0, out=out)
def collate_numpy_array_fn(
batch,
*,
collate_fn_map: Optional[dict[Union[type, tuple[type, ...]], Callable]] = None,
):
elem = batch[0]
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map)
def collate_numpy_scalar_fn(
batch,
*,
collate_fn_map: Optional[dict[Union[type, tuple[type, ...]], Callable]] = None,
):
return torch.as_tensor(batch)
def collate_float_fn(
batch,
*,
collate_fn_map: Optional[dict[Union[type, tuple[type, ...]], Callable]] = None,
):
return torch.tensor(batch, dtype=torch.float64)
def collate_int_fn(
batch,
*,
collate_fn_map: Optional[dict[Union[type, tuple[type, ...]], Callable]] = None,
):
return torch.tensor(batch)
def collate_str_fn(
batch,
*,
collate_fn_map: Optional[dict[Union[type, tuple[type, ...]], Callable]] = None,
):
return batch
default_collate_fn_map: dict[Union[type, tuple[type, ...]], Callable] = {
torch.Tensor: collate_tensor_fn
}
with contextlib.suppress(ImportError):
import numpy as np
# For both ndarray and memmap (subclass of ndarray)
default_collate_fn_map[np.ndarray] = collate_numpy_array_fn
# See scalars hierarchy: https://numpy.org/doc/stable/reference/arrays.scalars.html
# Skip string scalars
default_collate_fn_map[(np.bool_, np.number, np.object_)] = collate_numpy_scalar_fn
default_collate_fn_map[float] = collate_float_fn
default_collate_fn_map[int] = collate_int_fn
default_collate_fn_map[str] = collate_str_fn
default_collate_fn_map[bytes] = collate_str_fn
def default_collate(batch):
r"""
Take in a batch of data and put the elements within the batch into a tensor with an additional outer dimension - batch size.
The exact output type can be a :class:`torch.Tensor`, a `Sequence` of :class:`torch.Tensor`, a
Collection of :class:`torch.Tensor`, or left unchanged, depending on the input type.
This is used as the default function for collation when
`batch_size` or `batch_sampler` is defined in :class:`~torch.utils.data.DataLoader`.
Here is the general input type (based on the type of the element within the batch) to output type mapping:
* :class:`torch.Tensor` -> :class:`torch.Tensor` (with an added outer dimension batch size)
* NumPy Arrays -> :class:`torch.Tensor`
* `float` -> :class:`torch.Tensor`
* `int` -> :class:`torch.Tensor`
* `str` -> `str` (unchanged)
* `bytes` -> `bytes` (unchanged)
* `Mapping[K, V_i]` -> `Mapping[K, default_collate([V_1, V_2, ...])]`
* `NamedTuple[V1_i, V2_i, ...]` -> `NamedTuple[default_collate([V1_1, V1_2, ...]),
default_collate([V2_1, V2_2, ...]), ...]`
* `Sequence[V1_i, V2_i, ...]` -> `Sequence[default_collate([V1_1, V1_2, ...]),
default_collate([V2_1, V2_2, ...]), ...]`
Args:
batch: a single batch to be collated
Examples:
>>> # xdoctest: +SKIP
>>> # Example with a batch of `int`s:
>>> default_collate([0, 1, 2, 3])
tensor([0, 1, 2, 3])
>>> # Example with a batch of `str`s:
>>> default_collate(['a', 'b', 'c'])
['a', 'b', 'c']
>>> # Example with `Map` inside the batch:
>>> default_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}])
{'A': tensor([ 0, 100]), 'B': tensor([ 1, 100])}
>>> # Example with `NamedTuple` inside the batch:
>>> Point = namedtuple('Point', ['x', 'y'])
>>> default_collate([Point(0, 0), Point(1, 1)])
Point(x=tensor([0, 1]), y=tensor([0, 1]))
>>> # Example with `Tuple` inside the batch:
>>> default_collate([(0, 1), (2, 3)])
[tensor([0, 2]), tensor([1, 3])]
>>> # Example with `List` inside the batch:
>>> default_collate([[0, 1], [2, 3]])
[tensor([0, 2]), tensor([1, 3])]
>>> # Two options to extend `default_collate` to handle specific type
>>> # Option 1: Write custom collate function and invoke `default_collate`
>>> def custom_collate(batch):
... elem = batch[0]
... if isinstance(elem, CustomType): # Some custom condition
... return ...
... else: # Fall back to `default_collate`
... return default_collate(batch)
>>> # Option 2: In-place modify `default_collate_fn_map`
>>> def collate_customtype_fn(batch, *, collate_fn_map=None):
... return ...
>>> default_collate_fn_map.update(CustomType, collate_customtype_fn)
>>> default_collate(batch) # Handle `CustomType` automatically
"""
return collate(batch, collate_fn_map=default_collate_fn_map)
```
|
========================================================================================================================
SOURCE CODE FILE: fetch.py
LINES: 1
SIZE: 1.96 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\_utils\fetch.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
r"""Contains definitions of the methods used by the _BaseDataLoaderIter to fetch data from an iterable-style or map-style dataset.
This logic is shared in both single- and multi-processing data loading.
"""
class _BaseDatasetFetcher:
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
self.dataset = dataset
self.auto_collation = auto_collation
self.collate_fn = collate_fn
self.drop_last = drop_last
def fetch(self, possibly_batched_index):
raise NotImplementedError
class _IterableDatasetFetcher(_BaseDatasetFetcher):
def __init__(self, dataset, auto_collation, collate_fn, drop_last):
super().__init__(dataset, auto_collation, collate_fn, drop_last)
self.dataset_iter = iter(dataset)
self.ended = False
def fetch(self, possibly_batched_index):
if self.ended:
raise StopIteration
if self.auto_collation:
data = []
for _ in possibly_batched_index:
try:
data.append(next(self.dataset_iter))
except StopIteration:
self.ended = True
break
if len(data) == 0 or (
self.drop_last and len(data) < len(possibly_batched_index)
):
raise StopIteration
else:
data = next(self.dataset_iter)
return self.collate_fn(data)
class _MapDatasetFetcher(_BaseDatasetFetcher):
def fetch(self, possibly_batched_index):
if self.auto_collation:
if hasattr(self.dataset, "__getitems__") and self.dataset.__getitems__:
data = self.dataset.__getitems__(possibly_batched_index)
else:
data = [self.dataset[idx] for idx in possibly_batched_index]
else:
data = self.dataset[possibly_batched_index]
return self.collate_fn(data)
```
|
=============================================================================================================================
SOURCE CODE FILE: pin_memory.py
LINES: 1
SIZE: 4.45 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\_utils\pin_memory.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
r"""Contains definitions of the methods used by the _BaseDataLoaderIter to put fetched tensors into pinned memory.
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import collections
import copy
import queue
import torch
from torch._utils import ExceptionWrapper
from . import MP_STATUS_CHECK_INTERVAL
def _pin_memory_loop(in_queue, out_queue, device_id, done_event, device):
# This setting is thread local, and prevents the copy in pin_memory from
# consuming all CPU cores.
torch.set_num_threads(1)
torch.multiprocessing._set_thread_name("pt_data_pin")
if device == "cuda":
torch.cuda.set_device(device_id)
elif device == "xpu":
torch.xpu.set_device(device_id) # type: ignore[attr-defined]
elif device == torch._C._get_privateuse1_backend_name():
custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name())
custom_device_mod.set_device(device_id)
elif device is None:
torch.accelerator.set_device_index(device_id)
def do_one_step():
try:
r = in_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
return
idx, data = r
if not done_event.is_set() and not isinstance(data, ExceptionWrapper):
try:
data = pin_memory(data, device)
except Exception:
data = ExceptionWrapper(
where=f"in pin memory thread for device {device_id}"
)
r = (idx, data)
while not done_event.is_set():
try:
out_queue.put(r, timeout=MP_STATUS_CHECK_INTERVAL)
break
except queue.Full:
continue
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
# logic of this function.
while not done_event.is_set():
# Make sure that we don't preserve any object from one iteration
# to the next
do_one_step()
def pin_memory(data, device=None):
if isinstance(data, torch.Tensor):
return data.pin_memory(device)
elif isinstance(data, (str, bytes)):
return data
elif isinstance(data, collections.abc.Mapping):
try:
if isinstance(data, collections.abc.MutableMapping):
# The sequence type may have extra properties, so we can't just
# use `type(data)(...)` to create the new sequence.
# Create a clone and update it if the sequence type is mutable.
clone = copy.copy(data)
clone.update(
{k: pin_memory(sample, device) for k, sample in data.items()}
)
return clone
else:
return type(data)({k: pin_memory(sample, device) for k, sample in data.items()}) # type: ignore[call-arg]
except TypeError:
# The mapping type may not support `copy()` / `update(mapping)`
# or `__init__(iterable)`.
return {k: pin_memory(sample, device) for k, sample in data.items()}
elif isinstance(data, tuple) and hasattr(data, "_fields"): # namedtuple
return type(data)(*(pin_memory(sample, device) for sample in data))
elif isinstance(data, tuple):
return [
pin_memory(sample, device) for sample in data
] # Backwards compatibility.
elif isinstance(data, collections.abc.Sequence):
try:
if isinstance(data, collections.abc.MutableSequence):
# The sequence type may have extra properties, so we can't just
# use `type(data)(...)` to create the new sequence.
# Create a clone and update it if the sequence type is mutable.
clone = copy.copy(data) # type: ignore[arg-type]
for i, item in enumerate(data):
clone[i] = pin_memory(item, device)
return clone
return type(data)([pin_memory(sample, device) for sample in data]) # type: ignore[call-arg]
except TypeError:
# The sequence type may not support `copy()` / `__setitem__(index, item)`
# or `__init__(iterable)` (e.g., `range`).
return [pin_memory(sample, device) for sample in data]
elif hasattr(data, "pin_memory"):
return data.pin_memory()
else:
return data
```
|
==================================================================================================================================
SOURCE CODE FILE: signal_handling.py
LINES: 1
SIZE: 3.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\_utils\signal_handling.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
r"""Signal handling for multiprocessing data loading.
NOTE [ Signal handling in multiprocessing data loading ]
In cases like DataLoader, if a worker process dies due to bus error/segfault
or just hang, the main process will hang waiting for data. This is difficult
to avoid on PyTorch side as it can be caused by limited shm, or other
libraries users call in the workers. In this file and `DataLoader.cpp`, we make
our best effort to provide some error message to users when such unfortunate
events happen.
When a _BaseDataLoaderIter starts worker processes, their pids are registered in a
defined in `DataLoader.cpp`: id(_BaseDataLoaderIter) => Collection[ Worker pids ]
via `_set_worker_pids`.
When an error happens in a worker process, the main process received a SIGCHLD,
and Python will eventually call the handler registered below
(in `_set_SIGCHLD_handler`). In the handler, the `_error_if_any_worker_fails`
call checks all registered worker pids and raise proper error message to
prevent main process from hanging waiting for data from worker.
Additionally, at the beginning of each worker's `_utils.worker._worker_loop`,
`_set_worker_signal_handlers` is called to register critical signal handlers
(e.g., for SIGSEGV, SIGBUS, SIGFPE, SIGTERM) in C, which just prints an error
message to stderr before triggering the default handler. So a message will also
be printed from the worker process when it is killed by such signals.
See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for the reasoning of
this signal handling design and other mechanism we implement to make our
multiprocessing data loading robust to errors.
"""
import signal
import threading
# Some of the following imported functions are not used in this file, but are to
# be used `_utils.signal_handling.XXXXX`.
from torch._C import ( # noqa: F401
_error_if_any_worker_fails,
_remove_worker_pids,
_set_worker_pids,
_set_worker_signal_handlers,
)
from . import IS_WINDOWS
_SIGCHLD_handler_set = False
r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one
handler needs to be set for all DataLoaders in a process."""
def _set_SIGCHLD_handler():
# Windows doesn't support SIGCHLD handler
if IS_WINDOWS:
return
# can't set signal in child threads
if not isinstance(threading.current_thread(), threading._MainThread): # type: ignore[attr-defined]
return
global _SIGCHLD_handler_set
if _SIGCHLD_handler_set:
return
previous_handler = signal.getsignal(signal.SIGCHLD)
if not callable(previous_handler):
# This doesn't catch default handler, but SIGCHLD default handler is a
# no-op.
previous_handler = None
def handler(signum, frame):
# This following call uses `waitid` with WNOHANG from C side. Therefore,
# Python can still get and update the process status successfully.
_error_if_any_worker_fails()
if previous_handler is not None:
assert callable(previous_handler)
previous_handler(signum, frame)
signal.signal(signal.SIGCHLD, handler)
_SIGCHLD_handler_set = True
```
|
=========================================================================================================================
SOURCE CODE FILE: worker.py
LINES: 1
SIZE: 13.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\_utils\worker.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
r""""Contains definitions of the methods used by the _BaseDataLoaderIter workers.
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import os
import queue
import random
from dataclasses import dataclass
from typing import Optional, TYPE_CHECKING, Union
import torch
from torch._utils import ExceptionWrapper
from . import HAS_NUMPY, IS_WINDOWS, MP_STATUS_CHECK_INTERVAL, signal_handling
if TYPE_CHECKING:
from torch.utils.data import Dataset
if IS_WINDOWS:
import ctypes
from ctypes.wintypes import BOOL, DWORD, HANDLE
# On Windows, the parent ID of the worker process remains unchanged when the manager process
# is gone, and the only way to check it through OS is to let the worker have a process handle
# of the manager and ask if the process status has changed.
class ManagerWatchdog:
def __init__(self) -> None:
self.manager_pid = os.getppid()
# mypy cannot detect this code is windows only
self.kernel32 = ctypes.WinDLL("kernel32", use_last_error=True) # type: ignore[attr-defined]
self.kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD)
self.kernel32.OpenProcess.restype = HANDLE
self.kernel32.WaitForSingleObject.argtypes = (HANDLE, DWORD)
self.kernel32.WaitForSingleObject.restype = DWORD
# Value obtained from https://msdn.microsoft.com/en-us/library/ms684880.aspx
SYNCHRONIZE = 0x00100000
self.manager_handle = self.kernel32.OpenProcess(
SYNCHRONIZE, 0, self.manager_pid
)
if not self.manager_handle:
raise ctypes.WinError(ctypes.get_last_error()) # type: ignore[attr-defined]
self.manager_dead = False
def is_alive(self):
if not self.manager_dead:
# Value obtained from https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032.aspx
self.manager_dead = (
self.kernel32.WaitForSingleObject(self.manager_handle, 0) == 0
)
return not self.manager_dead
else:
class ManagerWatchdog: # type: ignore[no-redef]
def __init__(self) -> None:
self.manager_pid = os.getppid()
self.manager_dead = False
def is_alive(self):
if not self.manager_dead:
self.manager_dead = os.getppid() != self.manager_pid
return not self.manager_dead
_worker_info: Optional["WorkerInfo"] = None
class WorkerInfo:
id: int
num_workers: int
seed: int
dataset: "Dataset"
__initialized = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.__keys = tuple(kwargs.keys())
self.__initialized = True
def __setattr__(self, key, val):
if self.__initialized:
raise RuntimeError(
f"Cannot assign attributes to {self.__class__.__name__} objects"
)
return super().__setattr__(key, val)
def __repr__(self):
items = [f"{k}={getattr(self, k)}" for k in self.__keys]
return f"{self.__class__.__name__}({', '.join(items)})"
def get_worker_info() -> Optional[WorkerInfo]:
r"""Returns the information about the current
:class:`~torch.utils.data.DataLoader` iterator worker process.
When called in a worker, this returns an object guaranteed to have the
following attributes:
* :attr:`id`: the current worker id.
* :attr:`num_workers`: the total number of workers.
* :attr:`seed`: the random seed set for the current worker. This value is
determined by main process RNG and the worker id. See
:class:`~torch.utils.data.DataLoader`'s documentation for more details.
* :attr:`dataset`: the copy of the dataset object in **this** process. Note
that this will be a different object in a different process than the one
in the main process.
When called in the main process, this returns ``None``.
.. note::
When used in a :attr:`worker_init_fn` passed over to
:class:`~torch.utils.data.DataLoader`, this method can be useful to
set up each worker process differently, for instance, using ``worker_id``
to configure the ``dataset`` object to only read a specific fraction of a
sharded dataset, or use ``seed`` to seed other libraries used in dataset
code.
"""
return _worker_info
r"""Dummy class used to signal the end of an IterableDataset"""
@dataclass(frozen=True)
class _IterableDatasetStopIteration:
worker_id: int
r"""Dummy class used to resume the fetching when worker reuse is enabled"""
@dataclass(frozen=True)
class _ResumeIteration:
seed: Optional[int] = None
# The function `_generate_state` is adapted from `numpy.random.SeedSequence`
# from https://github.com/numpy/numpy/blob/main/numpy/random/bit_generator.pyx
# It's MIT licensed, here is the copyright:
# Copyright (c) 2015 Melissa E. O'Neill
# Copyright (c) 2019 NumPy Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This function generates an array of int32 as the seed for
# `numpy.random`, in order to prevent state collision due to same
# seed and algorithm for `numpy.random` and `random` modules.
# TODO: Implement `SeedSequence` like object for `torch.random`
def _generate_state(base_seed, worker_id):
INIT_A = 0x43B0D7E5
MULT_A = 0x931E8875
INIT_B = 0x8B51F9DD
MULT_B = 0x58F38DED
MIX_MULT_L = 0xCA01F9DD
MIX_MULT_R = 0x4973F715
XSHIFT = 4 * 8 // 2
MASK32 = 0xFFFFFFFF
entropy = [worker_id, base_seed & MASK32, base_seed >> 32, 0]
pool = [0] * 4
hash_const_A = INIT_A
def hash(value):
nonlocal hash_const_A
value = (value ^ hash_const_A) & MASK32
hash_const_A = (hash_const_A * MULT_A) & MASK32
value = (value * hash_const_A) & MASK32
value = (value ^ (value >> XSHIFT)) & MASK32
return value
def mix(x, y):
result_x = (MIX_MULT_L * x) & MASK32
result_y = (MIX_MULT_R * y) & MASK32
result = (result_x - result_y) & MASK32
result = (result ^ (result >> XSHIFT)) & MASK32
return result
# Add in the entropy to the pool.
for i in range(len(pool)):
pool[i] = hash(entropy[i])
# Mix all bits together so late bits can affect earlier bits.
for i_src in range(len(pool)):
for i_dst in range(len(pool)):
if i_src != i_dst:
pool[i_dst] = mix(pool[i_dst], hash(pool[i_src]))
hash_const_B = INIT_B
state = []
for i_dst in range(4):
data_val = pool[i_dst]
data_val = (data_val ^ hash_const_B) & MASK32
hash_const_B = (hash_const_B * MULT_B) & MASK32
data_val = (data_val * hash_const_B) & MASK32
data_val = (data_val ^ (data_val >> XSHIFT)) & MASK32
state.append(data_val)
return state
def _worker_loop(
dataset_kind,
dataset,
index_queue,
data_queue,
done_event,
auto_collation,
collate_fn,
drop_last,
base_seed,
init_fn,
worker_id,
num_workers,
persistent_workers,
shared_seed,
):
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the
# logic of this function.
try:
# Initialize C side signal handlers for SIGBUS and SIGSEGV. Python signal
# module's handlers are executed after Python returns from C low-level
# handlers, likely when the same fatal signal had already happened
# again.
# https://docs.python.org/3/library/signal.html#execution-of-python-signal-handlers
signal_handling._set_worker_signal_handlers()
torch.multiprocessing._set_thread_name("pt_data_worker")
torch.set_num_threads(1)
seed = base_seed + worker_id
random.seed(seed)
torch.manual_seed(seed)
if HAS_NUMPY:
np_seed = _generate_state(base_seed, worker_id)
import numpy as np
np.random.seed(np_seed)
from torch.utils.data import IterDataPipe
from torch.utils.data.graph_settings import apply_random_seed
shared_rng = torch.Generator()
if isinstance(dataset, IterDataPipe):
assert shared_seed is not None
shared_rng.manual_seed(shared_seed)
dataset = apply_random_seed(dataset, shared_rng)
global _worker_info
_worker_info = WorkerInfo(
id=worker_id, num_workers=num_workers, seed=seed, dataset=dataset
)
from torch.utils.data import _DatasetKind
init_exception = None
try:
if init_fn is not None:
init_fn(worker_id)
fetcher = _DatasetKind.create_fetcher(
dataset_kind, dataset, auto_collation, collate_fn, drop_last
)
except Exception:
init_exception = ExceptionWrapper(
where=f"in DataLoader worker process {worker_id}"
)
# When using Iterable mode, some worker can exit earlier than others due
# to the IterableDataset behaving differently for different workers.
# When such things happen, an `_IterableDatasetStopIteration` object is
# sent over to the main process with the ID of this worker, so that the
# main process won't send more tasks to this worker, and will send
# `None` to this worker to properly exit it.
#
# Note that we cannot set `done_event` from a worker as it is shared
# among all processes. Instead, we set the `iteration_end` flag to
# signify that the iterator is exhausted. When either `done_event` or
# `iteration_end` is set, we skip all processing step and just wait for
# `None`.
iteration_end = False
watchdog = ManagerWatchdog()
while watchdog.is_alive():
try:
r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
if isinstance(r, _ResumeIteration):
# Acknowledge the main process
data_queue.put((r, None))
iteration_end = False
if isinstance(dataset, IterDataPipe):
assert r.seed is not None
shared_rng.manual_seed(r.seed)
dataset = apply_random_seed(dataset, shared_rng)
# Recreate the fetcher for worker-reuse policy
fetcher = _DatasetKind.create_fetcher(
dataset_kind, dataset, auto_collation, collate_fn, drop_last
)
continue
elif r is None:
# Received the final signal
assert done_event.is_set() or iteration_end
break
elif done_event.is_set() or iteration_end:
# `done_event` is set. But I haven't received the final signal
# (None) yet. I will keep continuing until get it, and skip the
# processing steps.
continue
idx, index = r
data: Union[_IterableDatasetStopIteration, ExceptionWrapper]
if init_exception is not None:
data = init_exception
init_exception = None
else:
try:
data = fetcher.fetch(index) # type: ignore[possibly-undefined]
except Exception as e:
if (
isinstance(e, StopIteration)
and dataset_kind == _DatasetKind.Iterable
):
data = _IterableDatasetStopIteration(worker_id)
# Set `iteration_end`
# (1) to save future `next(...)` calls, and
# (2) to avoid sending multiple `_IterableDatasetStopIteration`s.
iteration_end = True
else:
# It is important that we don't store exc_info in a variable.
# `ExceptionWrapper` does the correct thing.
# See NOTE [ Python Traceback Reference Cycle Problem ]
data = ExceptionWrapper(
where=f"in DataLoader worker process {worker_id}"
)
data_queue.put((idx, data))
del data, idx, index, r # save memory
except KeyboardInterrupt:
# Main process will raise KeyboardInterrupt anyways.
pass
if done_event.is_set():
data_queue.cancel_join_thread()
data_queue.close()
```
|
==================================================================================================================================
SOURCE CODE FILE: backward_compatibility.py
LINES: 1
SIZE: 0.31 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\backward_compatibility.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from typing_extensions import deprecated as _deprecated
@_deprecated(
"Usage of `backward_compatibility.worker_init_fn` is deprecated "
"as `DataLoader` automatically applies sharding in every worker",
category=FutureWarning,
)
def worker_init_fn(worker_id):
pass
```
|
======================================================================================================================
SOURCE CODE FILE: dataloader.py
LINES: 1
SIZE: 79.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\dataloader.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
r"""Definition of the DataLoader and associated iterators that subclass _BaseDataLoaderIter.
To support these two classes, in `./_utils` we define many utility methods and
functions to be run in multiprocessing. E.g., the data loading worker loop is
in `./_utils/worker.py`.
"""
import functools
import itertools
import logging
import multiprocessing as python_multiprocessing
import os
import queue
import threading
import warnings
from collections.abc import Iterable
from typing import Any, Callable, Generic, Optional, TypeVar, Union
import torch
import torch.distributed as dist
import torch.utils.data.graph_settings
from torch._utils import ExceptionWrapper
from torch.utils.data import _utils
from torch.utils.data.datapipes.datapipe import (
_IterDataPipeSerializationWrapper,
_MapDataPipeSerializationWrapper,
IterDataPipe,
MapDataPipe,
)
from torch.utils.data.dataset import Dataset, IterableDataset
from torch.utils.data.sampler import (
BatchSampler,
RandomSampler,
Sampler,
SequentialSampler,
)
__all__ = [
"DataLoader",
"get_worker_info",
"default_collate",
"default_convert",
]
_T = TypeVar("_T")
_T_co = TypeVar("_T_co", covariant=True)
_worker_init_fn_t = Callable[[int], None]
# Ideally we would parameterize `DataLoader` by the return type of `collate_fn`, but there is currently no way to have that
# type parameter set to a default value if the user doesn't pass in a custom 'collate_fn'.
# See https://github.com/python/mypy/issues/3737.
_collate_fn_t = Callable[[list[_T]], Any]
# These functions used to be defined in this file. However, it was moved to
# _utils/collate.py. Although it is rather hard to access this from user land
# (one has to explicitly directly `import torch.utils.data.dataloader`), there
# probably is user code out there using it. This aliasing maintains BC in this
# aspect.
default_collate: _collate_fn_t = _utils.collate.default_collate
default_convert = _utils.collate.default_convert
get_worker_info = _utils.worker.get_worker_info
logger = logging.getLogger(__name__)
class _DatasetKind:
Map = 0
Iterable = 1
@staticmethod
def create_fetcher(kind, dataset, auto_collation, collate_fn, drop_last):
if kind == _DatasetKind.Map:
return _utils.fetch._MapDatasetFetcher(
dataset, auto_collation, collate_fn, drop_last
)
else:
return _utils.fetch._IterableDatasetFetcher(
dataset, auto_collation, collate_fn, drop_last
)
class _InfiniteConstantSampler(Sampler):
r"""Analogous to ``itertools.repeat(None, None)``.
Used as sampler for :class:`~torch.utils.data.IterableDataset`.
"""
def __iter__(self):
while True:
yield None
def _get_distributed_settings():
if dist.is_available() and dist.is_initialized():
return dist.get_world_size(), dist.get_rank()
else:
return 1, 0
def _sharding_worker_init_fn(worker_init_fn, world_size, rank_id, worker_id):
global_worker_id = worker_id
info = torch.utils.data.get_worker_info()
assert info is not None
total_workers = info.num_workers
datapipe = info.dataset
assert isinstance(datapipe, (IterDataPipe, MapDataPipe))
# To distribute elements across distributed process evenly, we should shard data on distributed
# processes first then shard on worker processes
total_workers *= world_size
global_worker_id = global_worker_id * world_size + rank_id
# For BC, use default SHARDING_PRIORITIES
torch.utils.data.graph_settings.apply_sharding(
datapipe, total_workers, global_worker_id
)
if worker_init_fn is not None:
worker_init_fn(worker_id)
def _share_dist_seed(generator, pg):
_shared_seed = torch.empty((), dtype=torch.int64).random_(generator=generator)
if isinstance(pg, dist.ProcessGroup):
dist.broadcast(_shared_seed, src=0, group=pg)
return _shared_seed.item()
class DataLoader(Generic[_T_co]):
r"""
Data loader combines a dataset and a sampler, and provides an iterable over the given dataset.
The :class:`~torch.utils.data.DataLoader` supports both map-style and
iterable-style datasets with single- or multi-process loading, customizing
loading order and optional automatic batching (collation) and memory pinning.
See :py:mod:`torch.utils.data` documentation page for more details.
Args:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: ``1``).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: ``False``).
sampler (Sampler or Iterable, optional): defines the strategy to draw
samples from the dataset. Can be any ``Iterable`` with ``__len__``
implemented. If specified, :attr:`shuffle` must not be specified.
batch_sampler (Sampler or Iterable, optional): like :attr:`sampler`, but
returns a batch of indices at a time. Mutually exclusive with
:attr:`batch_size`, :attr:`shuffle`, :attr:`sampler`,
and :attr:`drop_last`.
num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
collate_fn (Callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset.
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
into device/CUDA pinned memory before returning them. If your data elements
are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
see the example below.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
timeout (numeric, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: ``0``)
worker_init_fn (Callable, optional): If not ``None``, this will be called on each
worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
input, after seeding and before data loading. (default: ``None``)
multiprocessing_context (str or multiprocessing.context.BaseContext, optional): If
``None``, the default `multiprocessing context`_ of your operating system will
be used. (default: ``None``)
generator (torch.Generator, optional): If not ``None``, this RNG will be used
by RandomSampler to generate random indexes and multiprocessing to generate
``base_seed`` for workers. (default: ``None``)
prefetch_factor (int, optional, keyword-only arg): Number of batches loaded
in advance by each worker. ``2`` means there will be a total of
2 * num_workers batches prefetched across all workers. (default value depends
on the set value for num_workers. If value of num_workers=0 default is ``None``.
Otherwise, if value of ``num_workers > 0`` default is ``2``).
persistent_workers (bool, optional): If ``True``, the data loader will not shut down
the worker processes after a dataset has been consumed once. This allows to
maintain the workers `Dataset` instances alive. (default: ``False``)
pin_memory_device (str, optional): the device to :attr:`pin_memory` on if ``pin_memory`` is
``True``. If not given, the current :ref:`accelerator<accelerators>` will be the
default. This argument is discouraged and subject to deprecated.
in_order (bool, optional): If ``False``, the data loader will not enforce that batches
are returned in a first-in, first-out order. Only applies when ``num_workers > 0``. (default: ``True``)
.. warning:: If the ``spawn`` start method is used, :attr:`worker_init_fn`
cannot be an unpicklable object, e.g., a lambda function. See
:ref:`multiprocessing-best-practices` on more details related
to multiprocessing in PyTorch.
.. warning:: ``len(dataloader)`` heuristic is based on the length of the sampler used.
When :attr:`dataset` is an :class:`~torch.utils.data.IterableDataset`,
it instead returns an estimate based on ``len(dataset) / batch_size``, with proper
rounding depending on :attr:`drop_last`, regardless of multi-process loading
configurations. This represents the best guess PyTorch can make because PyTorch
trusts user :attr:`dataset` code in correctly handling multi-process
loading to avoid duplicate data.
However, if sharding results in multiple workers having incomplete last batches,
this estimate can still be inaccurate, because (1) an otherwise complete batch can
be broken into multiple ones and (2) more than one batch worth of samples can be
dropped when :attr:`drop_last` is set. Unfortunately, PyTorch can not detect such
cases in general.
See `Dataset Types`_ for more details on these two types of datasets and how
:class:`~torch.utils.data.IterableDataset` interacts with
`Multi-process data loading`_.
.. warning:: See :ref:`reproducibility`, and :ref:`dataloader-workers-random-seed`, and
:ref:`data-loading-randomness` notes for random seed related questions.
.. warning:: Setting `in_order` to `False` can harm reproducibility and may lead to a skewed data
distribution being fed to the trainer in cases with imbalanced data.
.. _multiprocessing context:
https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
"""
dataset: Dataset[_T_co]
batch_size: Optional[int]
num_workers: int
pin_memory: bool
drop_last: bool
timeout: float
sampler: Union[Sampler, Iterable]
pin_memory_device: str
prefetch_factor: Optional[int]
_iterator: Optional["_BaseDataLoaderIter"]
__initialized = False
def __init__(
self,
dataset: Dataset[_T_co],
batch_size: Optional[int] = 1,
shuffle: Optional[bool] = None,
sampler: Union[Sampler, Iterable, None] = None,
batch_sampler: Union[Sampler[list], Iterable[list], None] = None,
num_workers: int = 0,
collate_fn: Optional[_collate_fn_t] = None,
pin_memory: bool = False,
drop_last: bool = False,
timeout: float = 0,
worker_init_fn: Optional[_worker_init_fn_t] = None,
multiprocessing_context=None,
generator=None,
*,
prefetch_factor: Optional[int] = None,
persistent_workers: bool = False,
pin_memory_device: str = "",
in_order: bool = True,
):
torch._C._log_api_usage_once("python.data_loader")
if num_workers < 0:
raise ValueError(
"num_workers option should be non-negative; "
"use num_workers=0 to disable multiprocessing."
)
if timeout < 0:
raise ValueError("timeout option should be non-negative")
if num_workers == 0 and prefetch_factor is not None:
raise ValueError(
"prefetch_factor option could only be specified in multiprocessing."
"let num_workers > 0 to enable multiprocessing, otherwise set prefetch_factor to None."
)
elif num_workers > 0 and prefetch_factor is None:
prefetch_factor = 2
elif prefetch_factor is not None and prefetch_factor < 0:
raise ValueError("prefetch_factor option should be non-negative")
if persistent_workers and num_workers == 0:
raise ValueError("persistent_workers option needs num_workers > 0")
self.dataset = dataset
self.num_workers = num_workers
self.prefetch_factor = prefetch_factor
self.pin_memory = pin_memory
self.pin_memory_device = pin_memory_device
self.timeout = timeout
self.worker_init_fn = worker_init_fn
self.multiprocessing_context = multiprocessing_context
self.in_order = in_order
# Adds forward compatibilities so classic DataLoader can work with DataPipes:
# _DataPipeSerializationWrapper container makes it easier to serialize without redefining pickler
if isinstance(self.dataset, IterDataPipe):
self.dataset = _IterDataPipeSerializationWrapper(self.dataset)
elif isinstance(self.dataset, MapDataPipe):
self.dataset = _MapDataPipeSerializationWrapper(self.dataset)
# Arg-check dataset related before checking samplers because we want to
# tell users that iterable-style datasets are incompatible with custom
# samplers first, so that they don't learn that this combo doesn't work
# after spending time fixing the custom sampler errors.
if isinstance(dataset, IterableDataset):
self._dataset_kind = _DatasetKind.Iterable
# NOTE [ Custom Samplers and IterableDataset ]
#
# `IterableDataset` does not support custom `batch_sampler` or
# `sampler` since the key is irrelevant (unless we support
# generator-style dataset one day...).
#
# For `sampler`, we always create a dummy sampler. This is an
# infinite sampler even when the dataset may have an implemented
# finite `__len__` because in multi-process data loading, naive
# settings will return duplicated data (which may be desired), and
# thus using a sampler with length matching that of dataset will
# cause data lost (you may have duplicates of the first couple
# batches, but never see anything afterwards). Therefore,
# `Iterabledataset` always uses an infinite sampler, an instance of
# `_InfiniteConstantSampler` defined above.
#
# A custom `batch_sampler` essentially only controls the batch size.
# However, it is unclear how useful it would be since an iterable-style
# dataset can handle that within itself. Moreover, it is pointless
# in multi-process data loading as the assignment order of batches
# to workers is an implementation detail so users can not control
# how to batchify each worker's iterable. Thus, we disable this
# option. If this turns out to be useful in future, we can re-enable
# this, and support custom samplers that specify the assignments to
# specific workers.
if isinstance(dataset, IterDataPipe):
if shuffle is not None:
dataset = torch.utils.data.graph_settings.apply_shuffle_settings(
dataset, shuffle=shuffle
)
# We cannot check `shuffle is not None` here, since previously `shuffle=False` was the default.
elif shuffle not in {False, None}:
raise ValueError(
f"DataLoader with IterableDataset: expected unspecified shuffle option, but got shuffle={shuffle}"
)
if sampler is not None:
# See NOTE [ Custom Samplers and IterableDataset ]
raise ValueError(
f"DataLoader with IterableDataset: expected unspecified sampler option, but got sampler={sampler}"
)
elif batch_sampler is not None:
# See NOTE [ Custom Samplers and IterableDataset ]
raise ValueError(
"DataLoader with IterableDataset: expected unspecified "
f"batch_sampler option, but got batch_sampler={batch_sampler}"
)
else:
shuffle = bool(shuffle)
self._dataset_kind = _DatasetKind.Map
if sampler is not None and shuffle:
raise ValueError("sampler option is mutually exclusive with shuffle")
if batch_sampler is not None:
# auto_collation with custom batch_sampler
if batch_size != 1 or shuffle or sampler is not None or drop_last:
raise ValueError(
"batch_sampler option is mutually exclusive "
"with batch_size, shuffle, sampler, and "
"drop_last"
)
batch_size = None
drop_last = False
elif batch_size is None:
# no auto_collation
if drop_last:
raise ValueError(
"batch_size=None option disables auto-batching "
"and is mutually exclusive with drop_last"
)
if sampler is None: # give default samplers
if self._dataset_kind == _DatasetKind.Iterable:
# See NOTE [ Custom Samplers and IterableDataset ]
sampler = _InfiniteConstantSampler()
else: # map-style
if shuffle:
sampler = RandomSampler(dataset, generator=generator) # type: ignore[arg-type]
else:
sampler = SequentialSampler(dataset) # type: ignore[arg-type]
if batch_size is not None and batch_sampler is None:
# auto_collation without custom batch_sampler
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
self.batch_size = batch_size
self.drop_last = drop_last
self.sampler = sampler
self.batch_sampler = batch_sampler
self.generator = generator
if collate_fn is None:
if self._auto_collation:
collate_fn = _utils.collate.default_collate
else:
collate_fn = _utils.collate.default_convert
self.collate_fn = collate_fn
self.persistent_workers = persistent_workers
self.__initialized = True
self._IterableDataset_len_called = (
None # See NOTE [ IterableDataset and __len__ ]
)
self._iterator = None
self.check_worker_number_rationality()
torch.set_vital("Dataloader", "enabled", "True") # type: ignore[attr-defined]
def _get_iterator(self) -> "_BaseDataLoaderIter":
if self.num_workers == 0:
return _SingleProcessDataLoaderIter(self)
else:
self.check_worker_number_rationality()
return _MultiProcessingDataLoaderIter(self)
@property
def multiprocessing_context(self):
return self.__multiprocessing_context
@multiprocessing_context.setter
def multiprocessing_context(self, multiprocessing_context):
if multiprocessing_context is not None:
if self.num_workers > 0:
if isinstance(multiprocessing_context, str):
valid_start_methods = torch.multiprocessing.get_all_start_methods()
if multiprocessing_context not in valid_start_methods:
raise ValueError(
"multiprocessing_context option "
f"should specify a valid start method in {valid_start_methods!r}, but got "
f"multiprocessing_context={multiprocessing_context!r}"
)
multiprocessing_context = torch.multiprocessing.get_context(
multiprocessing_context
)
if not isinstance(
multiprocessing_context, python_multiprocessing.context.BaseContext
):
raise TypeError(
"multiprocessing_context option should be a valid context "
"object or a string specifying the start method, but got "
f"multiprocessing_context={multiprocessing_context}"
)
else:
raise ValueError(
"multiprocessing_context can only be used with "
"multi-process loading (num_workers > 0), but got "
f"num_workers={self.num_workers}"
)
self.__multiprocessing_context = multiprocessing_context
def __setattr__(self, attr, val):
if self.__initialized and attr in (
"batch_size",
"batch_sampler",
"sampler",
"drop_last",
"dataset",
"persistent_workers",
):
raise ValueError(
f"{attr} attribute should not be set after {self.__class__.__name__} is initialized"
)
super().__setattr__(attr, val)
# We quote '_BaseDataLoaderIter' since it isn't defined yet and the definition can't be moved up
# since '_BaseDataLoaderIter' references 'DataLoader'.
def __iter__(self) -> "_BaseDataLoaderIter":
# When using a single worker the returned iterator should be
# created everytime to avoid resetting its state
# However, in the case of a multiple workers iterator
# the iterator is only created once in the lifetime of the
# DataLoader object so that workers can be reused
if self.persistent_workers and self.num_workers > 0:
if self._iterator is None:
self._iterator = self._get_iterator()
else:
self._iterator._reset(self)
return self._iterator
else:
return self._get_iterator()
@property
def _auto_collation(self):
return self.batch_sampler is not None
@property
def _index_sampler(self):
# The actual sampler used for generating indices for `_DatasetFetcher`
# (see _utils/fetch.py) to read data at each time. This would be
# `.batch_sampler` if in auto-collation mode, and `.sampler` otherwise.
# We can't change `.sampler` and `.batch_sampler` attributes for BC
# reasons.
if self._auto_collation:
return self.batch_sampler
else:
return self.sampler
def __len__(self) -> int:
if self._dataset_kind == _DatasetKind.Iterable:
# NOTE [ IterableDataset and __len__ ]
#
# For `IterableDataset`, `__len__` could be inaccurate when one naively
# does multi-processing data loading, since the samples will be duplicated.
# However, no real use case should be actually using that behavior, so
# it should count as a user error. We should generally trust user
# code to do the proper thing (e.g., configure each replica differently
# in `__iter__`), and give us the correct `__len__` if they choose to
# implement it (this will still throw if the dataset does not implement
# a `__len__`).
#
# To provide a further warning, we track if `__len__` was called on the
# `DataLoader`, save the returned value in `self._len_called`, and warn
# if the iterator ends up yielding more than this number of samples.
# Cannot statically verify that dataset is Sized
length = self._IterableDataset_len_called = len(self.dataset) # type: ignore[assignment, arg-type]
if (
self.batch_size is not None
): # IterableDataset doesn't allow custom sampler or batch_sampler
from math import ceil
if self.drop_last:
length = length // self.batch_size
else:
length = ceil(length / self.batch_size)
return length
else:
return len(self._index_sampler)
def check_worker_number_rationality(self):
# This function check whether the dataloader's worker number is rational based on
# current system's resource. Current rule is that if the number of workers this
# Dataloader will create is bigger than the number of logical cpus that is allowed to
# use, than we will pop up a warning to let user pay attention.
#
# eg. If current system has 2 physical CPUs with 16 cores each. And each core support 2
# threads, then the total logical cpus here is 2 * 16 * 2 = 64. Let's say current
# DataLoader process can use half of them which is 32, then the rational max number of
# worker that initiated from this process is 32.
# Now, let's say the created DataLoader has num_works = 40, which is bigger than 32.
# So the warning message is triggered to notify the user to lower the worker number if
# necessary.
#
#
# [Note] Please note that this function repects `cpuset` only when os.sched_getaffinity is
# available (available in most of Linux system, but not OSX and Windows).
# When os.sched_getaffinity is not available, os.cpu_count() is called instead, but
# it doesn't repect cpuset.
# We don't take threading into account since each worker process is single threaded
# at this time.
#
# We don't set any threading flags (eg. OMP_NUM_THREADS, MKL_NUM_THREADS, etc)
# other than `torch.set_num_threads` to 1 in the worker process, if the passing
# in functions use 3rd party modules that rely on those threading flags to determine
# how many thread to create (eg. numpy, etc), then it is caller's responsibility to
# set those flags correctly.
def _create_warning_msg(num_worker_suggest, num_worker_created, cpuset_checked):
suggested_max_worker_msg = (
(
(
"Our suggested max number of worker in current system is {}{}, which is smaller "
"than what this DataLoader is going to create."
).format(
num_worker_suggest,
(
""
if cpuset_checked
else " (`cpuset` is not taken into account)"
),
)
)
if num_worker_suggest is not None
else (
"DataLoader is not able to compute a suggested max number of worker in current system."
)
)
warn_msg = (
f"This DataLoader will create {num_worker_created} worker processes in total. {suggested_max_worker_msg} "
"Please be aware that excessive worker creation might get DataLoader running slow or even freeze, "
"lower the worker number to avoid potential slowness/freeze if necessary."
)
return warn_msg
if not self.num_workers or self.num_workers == 0:
return
# try to compute a suggested max number of worker based on system's resource
max_num_worker_suggest = None
cpuset_checked = False
if hasattr(os, "sched_getaffinity"):
try:
max_num_worker_suggest = len(os.sched_getaffinity(0))
cpuset_checked = True
except Exception:
pass
if max_num_worker_suggest is None:
# os.cpu_count() could return Optional[int]
# get cpu count first and check None in order to satisfy mypy check
cpu_count = os.cpu_count()
if cpu_count is not None:
max_num_worker_suggest = cpu_count
if max_num_worker_suggest is None:
warnings.warn(
_create_warning_msg(
max_num_worker_suggest, self.num_workers, cpuset_checked
)
)
return
if self.num_workers > max_num_worker_suggest:
warnings.warn(
_create_warning_msg(
max_num_worker_suggest, self.num_workers, cpuset_checked
)
)
class _BaseDataLoaderIter:
def __init__(self, loader: DataLoader) -> None:
self._dataset = loader.dataset
self._shared_seed = None
self._pg = None
if isinstance(self._dataset, IterDataPipe):
if dist.is_available() and dist.is_initialized():
self._pg = dist.new_group(backend="gloo")
self._shared_seed = _share_dist_seed(loader.generator, self._pg)
shared_rng = torch.Generator()
shared_rng.manual_seed(self._shared_seed)
self._dataset = torch.utils.data.graph_settings.apply_random_seed(
self._dataset, shared_rng
)
self._dataset_kind = loader._dataset_kind
self._IterableDataset_len_called = loader._IterableDataset_len_called
self._auto_collation = loader._auto_collation
self._drop_last = loader.drop_last
self._index_sampler = loader._index_sampler
self._num_workers = loader.num_workers
ws, rank = _get_distributed_settings()
self._world_size = ws
self._rank = rank
# If pin_memory_device not set, default behaviour is current accelerator.
# If pin_memory_device is set but pin_memory is not set, the default
# behaviour false.
if len(loader.pin_memory_device) == 0:
if loader.pin_memory and not torch.accelerator.is_available():
warn_msg = (
"'pin_memory' argument is set as true but no accelerator is found, "
"then device pinned memory won't be used."
)
warnings.warn(warn_msg)
self._pin_memory = loader.pin_memory and torch.accelerator.is_available()
self._pin_memory_device = None
# Currently, pin_memory would raise error on the MPS backend (see
# https://github.com/pytorch/pytorch/issues/86060), so forcibly
# disable pin_memory on MPS. Remove this restriction once pinned
# memory allocation for MPS is fixed.
if (
self._pin_memory
and (acc := torch.accelerator.current_accelerator()) is not None
and acc.type == "mps"
):
self._pin_memory = False
warn_msg = (
"'pin_memory' argument is set as true but not supported on MPS now, "
"then device pinned memory won't be used."
)
warnings.warn(warn_msg)
else:
if not loader.pin_memory:
warn_msg = (
"'pin_memory_device' is set but 'pin_memory' argument is not set, "
"then device pinned memory won't be used."
"please set 'pin_memory' to true, if you need to use the device pin memory"
)
warnings.warn(warn_msg)
self._pin_memory = loader.pin_memory
self._pin_memory_device = loader.pin_memory_device
self._timeout = loader.timeout
self._collate_fn = loader.collate_fn
self._sampler_iter = iter(self._index_sampler)
self._base_seed = (
torch.empty((), dtype=torch.int64)
.random_(generator=loader.generator)
.item()
)
self._persistent_workers = loader.persistent_workers
self._num_yielded = 0
self._profile_name = f"enumerate(DataLoader)#{self.__class__.__name__}.__next__"
def __iter__(self) -> "_BaseDataLoaderIter":
return self
def _reset(self, loader, first_iter=False):
self._sampler_iter = iter(self._index_sampler)
self._num_yielded = 0
self._IterableDataset_len_called = loader._IterableDataset_len_called
if isinstance(self._dataset, IterDataPipe):
self._shared_seed = _share_dist_seed(loader.generator, self._pg)
shared_rng = torch.Generator()
shared_rng.manual_seed(self._shared_seed)
self._dataset = torch.utils.data.graph_settings.apply_random_seed(
self._dataset, shared_rng
)
def _next_index(self):
return next(self._sampler_iter) # may raise StopIteration
def _next_data(self):
raise NotImplementedError
def __next__(self) -> Any:
with torch.autograd.profiler.record_function(self._profile_name):
if self._sampler_iter is None:
# TODO(https://github.com/pytorch/pytorch/issues/76750)
self._reset() # type: ignore[call-arg]
data = self._next_data()
self._num_yielded += 1
if (
self._dataset_kind == _DatasetKind.Iterable
and self._IterableDataset_len_called is not None
and self._num_yielded > self._IterableDataset_len_called
):
warn_msg = (
f"Length of IterableDataset {self._dataset} was reported to be {self._IterableDataset_len_called}"
f"(when accessing len(dataloader)), but {self._num_yielded} samples have been fetched. "
)
if self._num_workers > 0:
warn_msg += (
"For multiprocessing data-loading, this could be caused by not properly configuring the "
"IterableDataset replica at each worker. Please see "
"https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset for examples."
)
warnings.warn(warn_msg)
return data
def __len__(self) -> int:
return len(self._index_sampler)
def __getstate__(self):
# TODO: add limited pickling support for sharing an iterator
# across multiple threads for HOGWILD.
# Probably the best way to do this is by moving the sample pushing
# to a separate thread and then just sharing the data queue
# but signalling the end is tricky without a non-blocking API
raise NotImplementedError("{} cannot be pickled", self.__class__.__name__)
class _SingleProcessDataLoaderIter(_BaseDataLoaderIter):
def __init__(self, loader):
super().__init__(loader)
assert self._timeout == 0
assert self._num_workers == 0
# Adds forward compatibilities so classic DataLoader can work with DataPipes:
# Taking care of distributed sharding
if isinstance(self._dataset, (IterDataPipe, MapDataPipe)):
# For BC, use default SHARDING_PRIORITIES
torch.utils.data.graph_settings.apply_sharding(
self._dataset, self._world_size, self._rank
)
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind,
self._dataset,
self._auto_collation,
self._collate_fn,
self._drop_last,
)
def _next_data(self):
index = self._next_index() # may raise StopIteration
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
if self._pin_memory:
data = _utils.pin_memory.pin_memory(data, self._pin_memory_device)
return data
class _MultiProcessingDataLoaderIter(_BaseDataLoaderIter):
r"""Iterates once over the DataLoader's dataset, as specified by the sampler."""
# NOTE [ Data Loader Multiprocessing Shutdown Logic ]
#
# Preliminary:
#
# Our data model looks like this (queues are indicated with curly brackets):
#
# main process ||
# | ||
# {index_queue} ||
# | ||
# worker processes || DATA
# | ||
# {worker_result_queue} || FLOW
# | ||
# pin_memory_thread of main process || DIRECTION
# | ||
# {data_queue} ||
# | ||
# data output \/
#
# P.S. `worker_result_queue` and `pin_memory_thread` part may be omitted if
# `pin_memory=False`.
#
#
# Terminating multiprocessing logic requires very careful design. In
# particular, we need to make sure that
#
# 1. The iterator gracefully exits the workers when its last reference is
# gone or it is depleted.
#
# In this case, the workers should be gracefully exited because the
# main process may still need to continue to run, and we want cleaning
# up code in the workers to be executed (e.g., releasing GPU memory).
# Naturally, we implement the shutdown logic in `__del__` of
# DataLoaderIterator.
#
# We delay the discussion on the logic in this case until later.
#
# 2. The iterator exits the workers when the loader process and/or worker
# processes exits normally or with error.
#
# We set all workers and `pin_memory_thread` to have `daemon=True`.
#
# You may ask, why can't we make the workers non-daemonic, and
# gracefully exit using the same logic as we have in `__del__` when the
# iterator gets deleted (see 1 above)?
#
# First of all, `__del__` is **not** guaranteed to be called when
# interpreter exits. Even if it is called, by the time it executes,
# many Python core library resources may already be freed, and even
# simple things like acquiring an internal lock of a queue may hang.
# Therefore, in this case, we actually need to prevent `__del__` from
# being executed, and rely on the automatic termination of daemonic
# children.
#
# Thus, we register an `atexit` hook that sets a global flag
# `_utils.python_exit_status`. Since `atexit` hooks are executed in the
# reverse order of registration, we are guaranteed that this flag is
# set before library resources we use are freed (which, at least in
# CPython, is done via an `atexit` handler defined in
# `multiprocessing/util.py`
# https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/util.py#L320-L362
# registered when an object requiring this mechanism is first
# created, e.g., `mp.Queue`
# https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/context.py#L100-L103
# https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/queues.py#L29
# )
#
# So in `__del__`, we check if `_utils.python_exit_status` is set or
# `None` (freed), and perform no-op if so.
#
# However, simply letting library clean-up codes run can also be bad,
# because such codes (i.e., `multiprocessing.util._exit_function()`)
# include join putting threads for `mp.Queue`, which can be blocking.
# Hence, the main process putting threads are called with
# `cancel_join_thread` at creation. See later section
# [ 3b. A process won't hang when putting into a queue; ]
# for more details.
#
# Here are two example cases where library clean-up codes can run
# before `__del__` is called:
#
# 1. If we hold onto a reference to the iterator, it more often
# than not tries to do `multiprocessing` library cleaning before
# clearing the alive referenced objects (https://github.com/pytorch/pytorch/issues/48666)
# and thus prevents our cleaning-up code to run first.
#
# 2. A similar issue araises when a `DataLoader` is used in a subprocess.
# When a process ends, it shuts the all its daemonic children
# down with a SIGTERM (instead of joining them without a timeout).
# Simiarly for threads, but by a different mechanism. This fact,
# together with a few implementation details of multiprocessing, forces
# us to make workers daemonic. All of our problems arise when a
# DataLoader is used in a subprocess, and are caused by multiprocessing
# code which looks more or less like this:
#
# try:
# your_function_using_a_dataloader()
# finally:
# multiprocessing.util._exit_function()
#
# The joining/termination mentioned above happens inside
# `_exit_function()`. Now, if `your_function_using_a_dataloader()`
# throws, the stack trace stored in the exception will prevent the
# frame which uses `DataLoaderIter` to be freed. If the frame has any
# reference to the `DataLoaderIter` (e.g., in a method of the iter),
# its `__del__`, which starts the shutdown procedure, will not be
# called. That, in turn, means that workers aren't notified. Attempting
# to join in `_exit_function` will then result in a hang.
#
# For context, `_exit_function` is also registered as an `atexit` call.
# So it is unclear to me (@ssnl) why this is needed in a finally block.
# The code dates back to 2008 and there is no comment on the original
# PEP 371 or patch https://bugs.python.org/issue3050 (containing both
# the finally block and the `atexit` registration) that explains this.
#
#
# Finally, another choice is to just shutdown workers with logic in 1
# above whenever we see an error in `next`. This isn't ideal because
# a. It prevents users from using try-catch to resume data loading.
# b. It doesn't prevent hanging if users have references to the
# iterator.
#
# 3. All processes exit if any of them die unexpectedly by fatal signals.
#
# As shown above, the workers are set as daemonic children of the main
# process. However, automatic cleaning-up of such child processes only
# happens if the parent process exits gracefully (e.g., not via fatal
# signals like SIGKILL). So we must ensure that each process will exit
# even the process that should send/receive data to/from it were
# killed, i.e.,
#
# a. A process won't hang when getting from a queue.
#
# Even with carefully designed data dependencies (i.e., a `put()`
# always corresponding to a `get()`), hanging on `get()` can still
# happen when data in queue is corrupted (e.g., due to
# `cancel_join_thread` or unexpected exit).
#
# For child exit, we set a timeout whenever we try to get data
# from `data_queue`, and check the workers' status on each timeout
# and error.
# See `_DataLoaderiter._get_batch()` and
# `_DataLoaderiter._try_get_data()` for details.
#
# Additionally, for child exit on non-Windows platforms, we also
# register a SIGCHLD handler (which is supported on Windows) on
# the main process, which checks if any of the workers fail in the
# (Python) handler. This is more efficient and faster in detecting
# worker failures, compared to only using the above mechanism.
# See `DataLoader.cpp` and `_utils/signal_handling.py` for details.
#
# For `.get()` calls where the sender(s) is not the workers, we
# guard them with timeouts, and check the status of the sender
# when timeout happens:
# + in the workers, the `_utils.worker.ManagerWatchdog` class
# checks the status of the main process.
# + if `pin_memory=True`, when getting from `pin_memory_thread`,
# check `pin_memory_thread` status periodically until `.get()`
# returns or see that `pin_memory_thread` died.
#
# b. A process won't hang when putting into a queue;
#
# We use `mp.Queue` which has a separate background thread to put
# objects from an unbounded buffer array. The background thread is
# daemonic and usually automatically joined when the process
# *exits*.
#
# In case that the receiver has ended abruptly while
# reading from the pipe, the join will hang forever. The usual
# solution for this in Python is calling `q.cancel_join_thread`,
# which prevents automatically joining it when finalizing
# (exiting).
#
# Nonetheless, `cancel_join_thread` must only be called when the
# queue is **not** going to be read from or write into by another
# process, because it may hold onto a lock or leave corrupted data
# in the queue, leading other readers/writers to hang.
#
# Hence,
# + For worker processes, we only do so (for their output
# queues, i.e., `worker_result_queue`) before exiting.
# + For `pin_memory_thread`, its output queue `data_queue` is a
# `queue.Queue` that does blocking `put` if the queue is full.
# So there is no above problem, but as a result, in
# `_pin_memory_loop`, we do need to wrap the `put` in a loop
# that breaks not only upon success, but also when the main
# process stops reading, i.e., is shutting down.
# + For loader process, we `cancel_join_thread()` for all
# `_index_queues` because the whole purpose of workers and
# `pin_memory_thread` is to serve the loader process. If
# loader process is already exiting, we don't really care if
# the queues are corrupted.
#
#
# Now let's get back to 1:
# how we gracefully exit the workers when the last reference to the
# iterator is gone.
#
# To achieve this, we implement the following logic along with the design
# choices mentioned above:
#
# `workers_done_event`:
# A `multiprocessing.Event` shared among the main process and all worker
# processes. This is used to signal the workers that the iterator is
# shutting down. After it is set, they will not send processed data to
# queues anymore, and only wait for the final `None` before exiting.
# `done_event` isn't strictly needed. I.e., we can just check for `None`
# from the input queue, but it allows us to skip wasting resources
# processing data if we are already shutting down.
#
# `pin_memory_thread_done_event`:
# A `threading.Event` for a similar purpose to that of
# `workers_done_event`, but is for the `pin_memory_thread`. The reason
# that separate events are needed is that `pin_memory_thread` reads from
# the output queue of the workers. But the workers, upon seeing that
# `workers_done_event` is set, only wants to see the final `None`, and is
# not required to flush all data in the output queue (e.g., it may call
# `cancel_join_thread` on that queue if its `IterableDataset` iterator
# happens to exhaust coincidentally, which is out of the control of the
# main process). Thus, since we will exit `pin_memory_thread` before the
# workers (see below), two separete events are used.
#
# NOTE: In short, the protocol is that the main process will set these
# `done_event`s and then the corresponding processes/threads a `None`,
# and that they may exit at any time after receiving the `None`.
#
# NOTE: Using `None` as the final signal is valid, since normal data will
# always be a 2-tuple with the 1st element being the index of the data
# transferred (different from dataset index/key), and the 2nd being
# either the dataset key or the data sample (depending on which part
# of the data model the queue is at).
#
# [ worker processes ]
# While loader process is alive:
# Get from `index_queue`.
# If get anything else,
# Check `workers_done_event`.
# If set, continue to next iteration
# i.e., keep getting until see the `None`, then exit.
# Otherwise, process data:
# If is fetching from an `IterableDataset` and the iterator
# is exhausted, send an `_IterableDatasetStopIteration`
# object to signal iteration end. The main process, upon
# receiving such an object, will send `None` to this
# worker and not use the corresponding `index_queue`
# anymore.
# If timed out,
# No matter `workers_done_event` is set (still need to see `None`)
# or not, must continue to next iteration.
# (outside loop)
# If `workers_done_event` is set, (this can be False with `IterableDataset`)
# `data_queue.cancel_join_thread()`. (Everything is ending here:
# main process won't read from it;
# other workers will also call
# `cancel_join_thread`.)
#
# [ pin_memory_thread ]
# # No need to check main thread. If this thread is alive, the main loader
# # thread must be alive, because this thread is set as daemonic.
# While `pin_memory_thread_done_event` is not set:
# Get from `worker_result_queue`.
# If timed out, continue to get in the next iteration.
# Otherwise, process data.
# While `pin_memory_thread_done_event` is not set:
# Put processed data to `data_queue` (a `queue.Queue` with blocking put)
# If timed out, continue to put in the next iteration.
# Otherwise, break, i.e., continuing to the out loop.
#
# NOTE: we don't check the status of the main thread because
# 1. if the process is killed by fatal signal, `pin_memory_thread`
# ends.
# 2. in other cases, either the cleaning-up in __del__ or the
# automatic exit of daemonic thread will take care of it.
# This won't busy-wait either because `.get(timeout)` does not
# busy-wait.
#
# [ main process ]
# In the DataLoader Iter's `__del__`
# b. Exit `pin_memory_thread`
# i. Set `pin_memory_thread_done_event`.
# ii Put `None` in `worker_result_queue`.
# iii. Join the `pin_memory_thread`.
# iv. `worker_result_queue.cancel_join_thread()`.
#
# c. Exit the workers.
# i. Set `workers_done_event`.
# ii. Put `None` in each worker's `index_queue`.
# iii. Join the workers.
# iv. Call `.cancel_join_thread()` on each worker's `index_queue`.
#
# NOTE: (c) is better placed after (b) because it may leave corrupted
# data in `worker_result_queue`, which `pin_memory_thread`
# reads from, in which case the `pin_memory_thread` can only
# happen at timing out, which is slow. Nonetheless, same thing
# happens if a worker is killed by signal at unfortunate times,
# but in other cases, we are better off having a non-corrupted
# `worker_result_queue` for `pin_memory_thread`.
#
# NOTE: If `pin_memory=False`, there is no `pin_memory_thread` and (b)
# can be omitted
#
# NB: `done_event`s isn't strictly needed. E.g., we can just check for
# `None` from `index_queue`, but it allows us to skip wasting resources
# processing indices already in `index_queue` if we are already shutting
# down.
def __init__(self, loader):
super().__init__(loader)
self._prefetch_factor = loader.prefetch_factor
self._in_order = loader.in_order
assert self._num_workers > 0
assert self._prefetch_factor > 0
if loader.multiprocessing_context is None:
multiprocessing_context = torch.multiprocessing
else:
multiprocessing_context = loader.multiprocessing_context
self._worker_init_fn = loader.worker_init_fn
# Adds forward compatibilities so classic DataLoader can work with DataPipes:
# Additional worker init function will take care of sharding in MP and Distributed
if isinstance(self._dataset, (IterDataPipe, MapDataPipe)):
self._worker_init_fn = functools.partial(
_sharding_worker_init_fn,
self._worker_init_fn,
self._world_size,
self._rank,
)
# No certainty which module multiprocessing_context is
self._worker_result_queue = multiprocessing_context.Queue() # type: ignore[var-annotated]
self._worker_pids_set = False
self._shutdown = False
self._workers_done_event = multiprocessing_context.Event()
self._index_queues = []
self._workers = []
for i in range(self._num_workers):
# No certainty which module multiprocessing_context is
index_queue = multiprocessing_context.Queue() # type: ignore[var-annotated]
# Need to `cancel_join_thread` here!
# See sections (2) and (3b) above.
index_queue.cancel_join_thread()
w = multiprocessing_context.Process(
target=_utils.worker._worker_loop,
args=(
self._dataset_kind,
self._dataset,
index_queue,
self._worker_result_queue,
self._workers_done_event,
self._auto_collation,
self._collate_fn,
self._drop_last,
self._base_seed,
self._worker_init_fn,
i,
self._num_workers,
self._persistent_workers,
self._shared_seed,
),
)
w.daemon = True
# NB: Process.start() actually take some time as it needs to
# start a process and pass the arguments over via a pipe.
# Therefore, we only add a worker to self._workers list after
# it started, so that we do not call .join() if program dies
# before it starts, and __del__ tries to join but will get:
# AssertionError: can only join a started process.
w.start()
self._index_queues.append(index_queue)
self._workers.append(w)
if self._pin_memory:
self._pin_memory_thread_done_event = threading.Event()
# Queue is not type-annotated
self._data_queue = queue.Queue() # type: ignore[var-annotated]
current_device = -1
if self._pin_memory_device == "cuda":
current_device = torch.cuda.current_device()
elif self._pin_memory_device == "xpu":
current_device = torch.xpu.current_device()
elif self._pin_memory_device == torch._C._get_privateuse1_backend_name():
custom_device_mod = getattr(
torch, torch._C._get_privateuse1_backend_name()
)
current_device = custom_device_mod.current_device()
elif self._pin_memory_device is None:
current_device = torch.accelerator.current_device_index()
pin_memory_thread = threading.Thread(
target=_utils.pin_memory._pin_memory_loop,
args=(
self._worker_result_queue,
self._data_queue,
current_device,
self._pin_memory_thread_done_event,
self._pin_memory_device,
),
)
pin_memory_thread.daemon = True
pin_memory_thread.start()
# Similar to workers (see comment above), we only register
# pin_memory_thread once it is started.
self._pin_memory_thread = pin_memory_thread
else:
self._data_queue = self._worker_result_queue # type: ignore[assignment]
# In some rare cases, persistent workers (daemonic processes)
# would be terminated before `__del__` of iterator is invoked
# when main process exits
# It would cause failure when pin_memory_thread tries to read
# corrupted data from worker_result_queue
# atexit is used to shutdown thread and child processes in the
# right sequence before main process exits
if self._persistent_workers and self._pin_memory:
import atexit
for w in self._workers:
atexit.register(_MultiProcessingDataLoaderIter._clean_up_worker, w)
# .pid can be None only before process is spawned (not the case, so ignore)
_utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self._workers)) # type: ignore[misc]
_utils.signal_handling._set_SIGCHLD_handler()
self._worker_pids_set = True
self._reset(loader, first_iter=True)
def _reset(self, loader, first_iter=False):
super()._reset(loader, first_iter)
self._send_idx = 0 # idx of the next task to be sent to workers
self._rcvd_idx = 0 # idx of the next task to be returned in __next__
# information about data not yet yielded, i.e., tasks w/ indices in range [rcvd_idx, send_idx).
# map: task idx => - (worker_id,) if data isn't fetched (outstanding)
# \ (worker_id, data) if data is already fetched (out-of-order)
self._task_info = {}
self._tasks_outstanding = (
0 # always equal to count(v for v in task_info.values() if len(v) == 1)
)
# A list of booleans representing whether each worker still has work to
# do, i.e., not having exhausted its iterable dataset object. It always
# contains all `True`s if not using an iterable-style dataset
# (i.e., if kind != Iterable).
# Not that this indicates that a worker still has work to do *for this epoch*.
# It does not mean that a worker is dead. In case of `_persistent_workers`,
# the worker will be reset to available in the next epoch.
self._workers_status = [True for i in range(self._num_workers)]
# A list of integers representing how many tasks are outstanding for each worker
# Incremented when a task is dispatched to the worker
# Decremented when that data has been given to the main thread
# Each worker should have at most self._prefetch_factor tasks outstanding
self._workers_num_tasks = [0 for i in range(self._num_workers)]
# Reset the worker queue cycle so it resumes next epoch at worker 0
self._worker_queue_idx_cycle = itertools.cycle(range(self._num_workers))
# We resume the prefetching in case it was enabled
if not first_iter:
for idx in range(self._num_workers):
self._index_queues[idx].put(
_utils.worker._ResumeIteration(self._shared_seed)
)
resume_iteration_cnt = self._num_workers
while resume_iteration_cnt > 0:
return_idx, return_data = self._get_data()
if isinstance(return_idx, _utils.worker._ResumeIteration):
assert return_data is None
resume_iteration_cnt -= 1
# prime the prefetch loop
for _ in range(self._prefetch_factor * self._num_workers):
self._try_put_index()
def _try_get_data(self, timeout=_utils.MP_STATUS_CHECK_INTERVAL):
# Tries to fetch data from `self._data_queue` once for a given timeout.
# This can also be used as inner loop of fetching without timeout, with
# the sender status as the loop condition.
#
# This raises a `RuntimeError` if any worker died expectedly. This error
# can come from either the SIGCHLD handler in `_utils/signal_handling.py`
# (only for non-Windows platforms), or the manual check below on errors
# and timeouts.
#
# Returns a 2-tuple:
# (bool: whether successfully get data, any: data if successful else None)
try:
data = self._data_queue.get(timeout=timeout)
return (True, data)
except Exception as e:
# At timeout and error, we manually check whether any worker has
# failed. Note that this is the only mechanism for Windows to detect
# worker failures.
failed_workers = []
for worker_id, w in enumerate(self._workers):
if self._workers_status[worker_id] and not w.is_alive():
failed_workers.append(w)
self._mark_worker_as_unavailable(worker_id)
if len(failed_workers) > 0:
pids_str = ", ".join(str(w.pid) for w in failed_workers)
raise RuntimeError(
f"DataLoader worker (pid(s) {pids_str}) exited unexpectedly"
) from e
if isinstance(e, queue.Empty):
return (False, None)
import errno
import tempfile
try:
# Raise an exception if we are this close to the FDs limit.
# Apparently, trying to open only one file is not a sufficient
# test.
# See NOTE [ DataLoader on Linux and open files limit ]
fds_limit_margin = 10
[tempfile.NamedTemporaryFile() for i in range(fds_limit_margin)]
except OSError as e:
if e.errno == errno.EMFILE:
raise RuntimeError(
"Too many open files. Communication with the"
" workers is no longer possible. Please increase the"
" limit using `ulimit -n` in the shell or change the"
" sharing strategy by calling"
" `torch.multiprocessing.set_sharing_strategy('file_system')`"
" at the beginning of your code"
) from None
raise
# NOTE [ DataLoader on Linux and open files limit ]
#
# On Linux when DataLoader is used with multiprocessing we pass the data between
# the root process and the workers through SHM files. We remove those files from
# the filesystem as soon as they are created and keep them alive by
# passing around their file descriptors through AF_UNIX sockets. (See
# docs/source/multiprocessing.rst and 'Multiprocessing Technical Notes` in
# the wiki (https://github.com/pytorch/pytorch/wiki).)
#
# This sometimes leads us to exceeding the open files limit. When that happens,
# and the offending file descriptor is coming over a socket, the `socket` Python
# package silently strips the file descriptor from the message, setting only the
# `MSG_CTRUNC` flag (which might be a bit misleading since the manpage says that
# it _indicates that some control data were discarded due to lack of space in
# the buffer for ancillary data_). This might reflect the C implementation of
# AF_UNIX sockets.
#
# This behaviour can be reproduced with the script and instructions at the
# bottom of this note.
#
# When that happens, the standard Python `multiprocessing` (and not
# `torch.multiprocessing`) raises a `RuntimeError: received 0 items of ancdata`
#
# Sometimes, instead of the FD being stripped, you may get an `OSError:
# Too many open files`, both in the script below and in DataLoader. However,
# this is rare and seems to be nondeterministic.
#
#
# #!/usr/bin/env python3
# import sys
# import socket
# import os
# import array
# import shutil
# import socket
#
#
# if len(sys.argv) != 4:
# print("Usage: ", sys.argv[0], " tmp_dirname iteration (send|recv)")
# sys.exit(1)
#
# if __name__ == '__main__':
# dirname = sys.argv[1]
# sock_path = dirname + "/sock"
# iterations = int(sys.argv[2])
# def dummy_path(i):
# return dirname + "/" + str(i) + ".dummy"
#
#
# if sys.argv[3] == 'send':
# while not os.path.exists(sock_path):
# pass
# client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# client.connect(sock_path)
# for i in range(iterations):
# fd = os.open(dummy_path(i), os.O_WRONLY | os.O_CREAT)
# ancdata = array.array('i', [fd])
# msg = bytes([i % 256])
# print("Sending fd ", fd, " (iteration #", i, ")")
# client.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, ancdata)])
#
#
# else:
# assert sys.argv[3] == 'recv'
#
# if os.path.exists(dirname):
# raise Exception("Directory exists")
#
# os.mkdir(dirname)
#
# print("Opening socket...")
# server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# server.bind(sock_path)
#
# print("Listening...")
# for i in range(iterations):
# a = array.array('i')
# msg, ancdata, flags, addr = server.recvmsg(1, socket.CMSG_SPACE(a.itemsize))
# assert(len(ancdata) == 1)
# cmsg_level, cmsg_type, cmsg_data = ancdata[0]
# a.frombytes(cmsg_data)
# print("Received fd ", a[0], " (iteration #", i, ")")
#
# shutil.rmtree(dirname)
#
# Steps to reproduce:
#
# 1. Run two shells and set lower file descriptor limit in the receiving one:
# (shell1) ulimit -n 1020
# (shell2) ulimit -n 1022
#
# 2. Run the script above with the `recv` option in the first shell
# (shell1) ./test_socket.py sock_tmp 1017 recv
#
# 3. Run the script with the `send` option in the second shell:
# (shell2) ./test_socket.py sock_tmp 1017 send
def _get_data(self):
# Fetches data from `self._data_queue`.
#
# We check workers' status every `MP_STATUS_CHECK_INTERVAL` seconds,
# which we achieve by running `self._try_get_data(timeout=MP_STATUS_CHECK_INTERVAL)`
# in a loop. This is the only mechanism to detect worker failures for
# Windows. For other platforms, a SIGCHLD handler is also used for
# worker failure detection.
#
# If `pin_memory=True`, we also need check if `pin_memory_thread` had
# died at timeouts.
if self._timeout > 0:
success, data = self._try_get_data(self._timeout)
if success:
return data
else:
raise RuntimeError(
f"DataLoader timed out after {self._timeout} seconds"
)
elif self._pin_memory:
while self._pin_memory_thread.is_alive():
success, data = self._try_get_data()
if success:
return data
else:
# while condition is false, i.e., pin_memory_thread died.
raise RuntimeError("Pin memory thread exited unexpectedly")
# In this case, `self._data_queue` is a `queue.Queue`,. But we don't
# need to call `.task_done()` because we don't use `.join()`.
else:
while True:
success, data = self._try_get_data()
if success:
return data
def _next_data(self):
while True:
# If the worker responsible for `self._rcvd_idx` has already ended
# and was unable to fulfill this task (due to exhausting an `IterableDataset`),
# we try to advance `self._rcvd_idx` to find the next valid index.
#
# This part needs to run in the loop because both the `self._get_data()`
# call and `_IterableDatasetStopIteration` check below can mark
# extra worker(s) as dead.
while self._rcvd_idx < self._send_idx:
info = self._task_info.get(self._rcvd_idx, None)
if info:
worker_id = info[0]
if (
len(info) == 2 or self._workers_status[worker_id]
): # has data or is still active
break
del self._task_info[self._rcvd_idx]
self._rcvd_idx += 1
else:
# no valid `self._rcvd_idx` is found (i.e., didn't break)
if not self._persistent_workers:
self._shutdown_workers()
raise StopIteration
# Now `self._rcvd_idx` is the batch index we want to fetch
# Check if the next sample has already been generated
if len(self._task_info[self._rcvd_idx]) == 2:
worker_id, data = self._task_info.pop(self._rcvd_idx)
self._rcvd_idx += 1
return self._process_data(data, worker_id)
assert not self._shutdown and self._tasks_outstanding > 0
idx, data = self._get_data()
self._tasks_outstanding -= 1
if self._dataset_kind == _DatasetKind.Iterable:
# Check for _IterableDatasetStopIteration
if isinstance(data, _utils.worker._IterableDatasetStopIteration):
if self._persistent_workers:
self._workers_status[data.worker_id] = False
else:
self._mark_worker_as_unavailable(data.worker_id)
self._try_put_index()
continue
if idx != self._rcvd_idx:
if not self._in_order:
# don't store it for later, process now
# delete from self._task_info immediately
# this keeps the object size manageable
worker_id = self._task_info.pop(idx)[0]
return self._process_data(data, worker_id)
# store out-of-order samples
self._task_info[idx] += (data,)
else:
worker_id = self._task_info.pop(idx)[0]
self._rcvd_idx += 1
return self._process_data(data, worker_id)
def _try_put_index(self):
max_tasks = self._prefetch_factor * self._num_workers
assert self._tasks_outstanding < max_tasks
try:
index = self._next_index()
except StopIteration:
return
for _ in range(self._num_workers): # find the next active worker, if any
worker_queue_idx = next(self._worker_queue_idx_cycle)
if self._workers_status[worker_queue_idx]:
if self._in_order:
break
elif self._workers_num_tasks[worker_queue_idx] < max_tasks // sum(
self._workers_status
):
# when self._in_order is False, distribute work to a worker if it has capacity
# _workers_status is updated only in this thread, so the sum is guaranteed > 0
break
else:
# not found (i.e., didn't break)
return
self._index_queues[worker_queue_idx].put((self._send_idx, index)) # type: ignore[possibly-undefined]
self._task_info[self._send_idx] = (worker_queue_idx,)
self._workers_num_tasks[worker_queue_idx] += 1
self._tasks_outstanding += 1
self._send_idx += 1
def _process_data(self, data, worker_idx):
self._workers_num_tasks[worker_idx] -= 1
self._try_put_index()
if isinstance(data, ExceptionWrapper):
data.reraise()
return data
def _mark_worker_as_unavailable(self, worker_id, shutdown=False):
# Mark a worker as having finished its work e.g., due to
# exhausting an `IterableDataset`. This should be used only when this
# `_MultiProcessingDataLoaderIter` is going to continue running.
assert self._workers_status[worker_id] or (
self._persistent_workers and shutdown
)
# Signal termination to that specific worker.
q = self._index_queues[worker_id]
# Indicate that no more data will be put on this queue by the current
# process.
q.put(None)
# Note that we don't actually join the worker here, nor do we remove the
# worker's pid from C side struct because (1) joining may be slow, and
# (2) since we don't join, the worker may still raise error, and we
# prefer capturing those, rather than ignoring them, even though they
# are raised after the worker has finished its job.
# Joinning is deferred to `_shutdown_workers`, which it is called when
# all workers finish their jobs (e.g., `IterableDataset` replicas) or
# when this iterator is garbage collected.
self._workers_status[worker_id] = False
assert self._workers_done_event.is_set() == shutdown
def _shutdown_workers(self):
# Called when shutting down this `_MultiProcessingDataLoaderIter`.
# See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on
# the logic of this function.
if (
_utils is None
or _utils.python_exit_status is True
or _utils.python_exit_status is None
):
# See (2) of the note. If Python is shutting down, do no-op.
return
# Normal exit when last reference is gone / iterator is depleted.
# See (1) and the second half of the note.
if not self._shutdown:
self._shutdown = True
try:
# Normal exit when last reference is gone / iterator is depleted.
# See (1) and the second half of the note.
# Exit `pin_memory_thread` first because exiting workers may leave
# corrupted data in `worker_result_queue` which `pin_memory_thread`
# reads from.
if hasattr(self, "_pin_memory_thread"):
# Use hasattr in case error happens before we set the attribute.
self._pin_memory_thread_done_event.set()
# Send something to pin_memory_thread in case it is waiting
# so that it can wake up and check `pin_memory_thread_done_event`
self._worker_result_queue.put((None, None))
self._pin_memory_thread.join()
self._worker_result_queue.cancel_join_thread()
self._worker_result_queue.close()
# Exit workers now.
self._workers_done_event.set()
for worker_id in range(len(self._workers)):
# Get number of workers from `len(self._workers)` instead of
# `self._num_workers` in case we error before starting all
# workers.
# If we are using workers_status with persistent_workers
# we have to shut it down because the worker is paused
if self._persistent_workers or self._workers_status[worker_id]:
self._mark_worker_as_unavailable(worker_id, shutdown=True)
for w in self._workers:
# We should be able to join here, but in case anything went
# wrong, we set a timeout and if the workers fail to join,
# they are killed in the `finally` block.
w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)
for q in self._index_queues:
q.cancel_join_thread()
q.close()
finally:
# Even though all this function does is putting into queues that
# we have called `cancel_join_thread` on, weird things can
# happen when a worker is killed by a signal, e.g., hanging in
# `Event.set()`. So we need to guard this with SIGCHLD handler,
# and remove pids from the C side data structure only at the
# end.
#
# FIXME: Unfortunately, for Windows, we are missing a worker
# error detection mechanism here in this function, as it
# doesn't provide a SIGCHLD handler.
if self._worker_pids_set:
_utils.signal_handling._remove_worker_pids(id(self))
self._worker_pids_set = False
for w in self._workers:
if w.is_alive():
# Existing mechanisms try to make the workers exit
# peacefully, but in case that we unfortunately reach
# here, which we shouldn't, (e.g., pytorch/pytorch#39570),
# we kill the worker.
w.terminate()
# staticmethod is used to remove reference to `_MultiProcessingDataLoaderIter`
@staticmethod
def _clean_up_worker(w):
try:
w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)
finally:
if w.is_alive():
w.terminate()
def __del__(self):
self._shutdown_workers()
```
|
==============================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\__init__.py
ENCODING: utf-8
```py
from torch.utils.data.datapipes import dataframe as dataframe, iter as iter, map as map
```
|
================================================================================================================================
SOURCE CODE FILE: _decorator.py
LINES: 1
SIZE: 7.86 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\_decorator.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import inspect
from functools import wraps
from typing import Any, Callable, get_type_hints, Optional, Union
from torch.utils.data.datapipes._typing import _DataPipeMeta
from torch.utils.data.datapipes.datapipe import IterDataPipe, MapDataPipe
######################################################
# Functional API
######################################################
class functional_datapipe:
name: str
def __init__(self, name: str, enable_df_api_tracing=False) -> None:
"""
Define a functional datapipe.
Args:
enable_df_api_tracing - if set, any returned DataPipe would accept
DataFrames API in tracing mode.
"""
self.name = name
self.enable_df_api_tracing = enable_df_api_tracing
def __call__(self, cls):
if issubclass(cls, IterDataPipe):
if isinstance(cls, type): # type: ignore[arg-type]
if not isinstance(cls, _DataPipeMeta):
raise TypeError(
"`functional_datapipe` can only decorate IterDataPipe"
)
# with non_deterministic decorator
else:
if not isinstance(cls, non_deterministic) and not (
hasattr(cls, "__self__")
and isinstance(cls.__self__, non_deterministic)
):
raise TypeError(
"`functional_datapipe` can only decorate IterDataPipe"
)
IterDataPipe.register_datapipe_as_function(
self.name, cls, enable_df_api_tracing=self.enable_df_api_tracing
)
elif issubclass(cls, MapDataPipe):
MapDataPipe.register_datapipe_as_function(self.name, cls)
return cls
######################################################
# Determinism
######################################################
_determinism: bool = False
class guaranteed_datapipes_determinism:
prev: bool
def __init__(self) -> None:
global _determinism
self.prev = _determinism
_determinism = True
def __enter__(self) -> None:
pass
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
global _determinism
_determinism = self.prev
class non_deterministic:
cls: Optional[type[IterDataPipe]] = None
# TODO: Lambda for picking
deterministic_fn: Callable[[], bool]
def __init__(self, arg: Union[type[IterDataPipe], Callable[[], bool]]) -> None:
# 1. Decorator doesn't have any argument
if isinstance(arg, type): # type: ignore[arg-type]
if not issubclass(arg, IterDataPipe): # type: ignore[arg-type]
raise TypeError(
"Only `IterDataPipe` can be decorated with `non_deterministic`"
f", but {arg.__name__} is found"
)
self.cls = arg # type: ignore[assignment]
# 2. Decorator has an argument of a function
# This class should behave differently given different inputs. Use this
# function to verify the determinism for each instance.
# When the function returns True, the instance is non-deterministic. Otherwise,
# the instance is a deterministic DataPipe.
elif isinstance(arg, Callable): # type:ignore[arg-type]
self.deterministic_fn = arg # type: ignore[assignment, misc]
else:
raise TypeError(f"{arg} can not be decorated by non_deterministic")
def __call__(self, *args, **kwargs):
global _determinism
# Decorate IterDataPipe
if self.cls is not None:
if _determinism:
raise TypeError(
f"{self.cls.__name__} is non-deterministic, but you set 'guaranteed_datapipes_determinism'. "
"You can turn off determinism for this DataPipe if that is acceptable "
"for your application"
)
return self.cls(*args, **kwargs) # type: ignore[call-arg]
# Decorate with a functional argument
if not (
isinstance(args[0], type)
and issubclass(args[0], IterDataPipe) # type: ignore[arg-type]
):
raise TypeError(
f"Only `IterDataPipe` can be decorated, but {args[0].__name__} is found"
)
self.cls = args[0]
return self.deterministic_wrapper_fn
def deterministic_wrapper_fn(self, *args, **kwargs) -> IterDataPipe:
res = self.deterministic_fn(*args, **kwargs) # type: ignore[call-arg, misc]
if not isinstance(res, bool):
raise TypeError(
"deterministic_fn of `non_deterministic` decorator is required "
f"to return a boolean value, but {type(res)} is found"
)
global _determinism
if _determinism and res:
raise TypeError(
f"{self.cls.__name__} is non-deterministic with the inputs, but you set " # type: ignore[union-attr]
"'guaranteed_datapipes_determinism'. You can turn off determinism "
"for this DataPipe if that is acceptable for your application"
)
return self.cls(*args, **kwargs) # type: ignore[call-arg, misc]
######################################################
# Type validation
######################################################
# Validate each argument of DataPipe with hint as a subtype of the hint.
def argument_validation(f):
signature = inspect.signature(f)
hints = get_type_hints(f)
@wraps(f)
def wrapper(*args, **kwargs):
bound = signature.bind(*args, **kwargs)
for argument_name, value in bound.arguments.items():
if argument_name in hints and isinstance(
hints[argument_name], _DataPipeMeta
):
hint = hints[argument_name]
if not isinstance(value, IterDataPipe):
raise TypeError(
f"Expected argument '{argument_name}' as a IterDataPipe, but found {type(value)}"
)
if not value.type.issubtype(hint.type):
raise TypeError(
f"Expected type of argument '{argument_name}' as a subtype of "
f"hint {hint.type}, but found {value.type}"
)
return f(*args, **kwargs)
return wrapper
# Default value is True
_runtime_validation_enabled: bool = True
class runtime_validation_disabled:
prev: bool
def __init__(self) -> None:
global _runtime_validation_enabled
self.prev = _runtime_validation_enabled
_runtime_validation_enabled = False
def __enter__(self) -> None:
pass
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
global _runtime_validation_enabled
_runtime_validation_enabled = self.prev
# Runtime checking
# Validate output data is subtype of return hint
def runtime_validation(f):
# TODO:
# Can be extended to validate '__getitem__' and nonblocking
if f.__name__ != "__iter__":
raise TypeError(
f"Can not decorate function {f.__name__} with 'runtime_validation'"
)
@wraps(f)
def wrapper(self):
global _runtime_validation_enabled
if not _runtime_validation_enabled:
yield from f(self)
else:
it = f(self)
for d in it:
if not self.type.issubtype_of_instance(d):
raise RuntimeError(
f"Expected an instance as subtype of {self.type}, but found {d}({type(d)})"
)
yield d
return wrapper
```
|
====================================================================================================================================
SOURCE CODE FILE: _hook_iterator.py
LINES: 6
SIZE: 11.94 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\_hook_iterator.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import functools
import inspect
from enum import Enum
import torch
class _SnapshotState(Enum):
r"""
These are the snapshotting-related states that IterDataPipes can be in.
`NotStarted` - allows you to restore a snapshot and create an iterator with reset
`Restored` - cannot restore again, allows you to create an iterator without resetting the DataPipe
`Iterating` - can restore, will reset if you create a new iterator
"""
NotStarted = 0
Restored = 1
Iterating = 2
def _simplify_obj_name(obj) -> str:
"""Simplify the display strings of objects for the purpose of rendering within DataPipe error messages."""
if inspect.isfunction(obj):
return obj.__name__
else:
return repr(obj)
def _strip_datapipe_from_name(name: str) -> str:
return name.replace("IterDataPipe", "").replace("MapDataPipe", "")
def _generate_input_args_string(obj):
"""Generate a string for the input arguments of an object."""
signature = inspect.signature(obj.__class__)
input_param_names = set(signature.parameters.keys())
result = []
for name, value in inspect.getmembers(obj):
if name in input_param_names:
result.append((name, _simplify_obj_name(value)))
return ", ".join([f"{name}={value}" for name, value in result])
def _generate_iterdatapipe_msg(datapipe, simplify_dp_name: bool = False):
output_string = (
f"{datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})"
)
if simplify_dp_name:
output_string = _strip_datapipe_from_name(output_string)
return output_string
def _gen_invalid_iterdatapipe_msg(datapipe):
return (
"This iterator has been invalidated because another iterator has been created "
f"from the same IterDataPipe: {_generate_iterdatapipe_msg(datapipe)}\n"
"This may be caused multiple references to the same IterDataPipe. We recommend "
"using `.fork()` if that is necessary."
)
_feedback_msg = (
"\nFor feedback regarding this single iterator per IterDataPipe constraint, feel free "
"to comment on this issue: https://github.com/pytorch/data/issues/45."
)
def _check_iterator_valid(datapipe, iterator_id, next_method_exists=False) -> None:
r"""
Given an instance of a DataPipe and an iterator ID, check if the IDs match, and if not, raises an exception.
In the case of ChildDataPipe, the ID gets compared to the one stored in `main_datapipe` as well.
"""
if next_method_exists:
# This is the case where `IterDataPipe` has both `__iter__` and `__next__`.
# The `_valid_iterator_id` should either be never set (`None`), or set by at most one
# iterator (`0`). Otherwise, it means there are multiple iterators.
if datapipe._valid_iterator_id is not None and datapipe._valid_iterator_id != 0:
extra_msg = "\nNote that this exception is raised inside your IterDataPipe's a `__next__` method"
raise RuntimeError(
_gen_invalid_iterdatapipe_msg(datapipe) + extra_msg + _feedback_msg
)
elif (
hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True
):
if hasattr(datapipe, "_check_valid_iterator_id"):
if not datapipe._check_valid_iterator_id(iterator_id):
raise RuntimeError(
"This iterator has been invalidated, because a new iterator has been created "
f"from one of the ChildDataPipes of "
f"{_generate_iterdatapipe_msg(datapipe.main_datapipe)}."
+ _feedback_msg
)
else:
raise RuntimeError(
"ChildDataPipe must have method `_check_valid_iterator_id`."
)
elif datapipe._valid_iterator_id != iterator_id:
raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + _feedback_msg)
def _set_datapipe_valid_iterator_id(datapipe):
"""Given a DataPipe, updates its valid iterator ID and reset the DataPipe."""
if hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True:
if hasattr(datapipe, "_set_main_datapipe_valid_iterator_id"):
datapipe._set_main_datapipe_valid_iterator_id() # reset() is called within this method when appropriate
else:
raise RuntimeError(
"ChildDataPipe must have method `_set_main_datapipe_valid_iterator_id`."
)
else:
if datapipe._valid_iterator_id is None:
datapipe._valid_iterator_id = 0
else:
datapipe._valid_iterator_id += 1
datapipe.reset()
return datapipe._valid_iterator_id
def hook_iterator(namespace):
r"""
Define a hook that is applied to all `__iter__` of metaclass `_DataPipeMeta`.
This is done for the purpose of profiling and checking if an iterator is still valid.
"""
def profiler_record_fn_context(datapipe):
if not hasattr(datapipe, "_profile_name"):
datapipe._profile_name = _generate_iterdatapipe_msg(
datapipe, simplify_dp_name=True
)
return torch.autograd.profiler.record_function(datapipe._profile_name)
class IteratorDecorator:
r"""
Wrap the iterator and modifying its `__next__` method.
This decorator is applied to DataPipes of which `__iter__` method is NOT a generator function.
Those `__iter__` method commonly returns `self` but not necessarily.
"""
def __init__(self, iterator, datapipe, iterator_id, has_next_method):
self.iterator = iterator
self.datapipe = datapipe
self.iterator_id = iterator_id
self._profiler_enabled = torch.autograd._profiler_enabled()
# Check if `__iter__` returns `self` and `DataPipe` has `__next__`
self.self_and_has_next_method = (
self.iterator is self.datapipe and has_next_method
)
def __iter__(self):
return self
def _get_next(self):
"""Return next with logic related to iterator validity, profiler, and incrementation of samples yielded."""
_check_iterator_valid(self.datapipe, self.iterator_id)
result = next(self.iterator)
if not self.self_and_has_next_method:
self.datapipe._number_of_samples_yielded += 1
return result
def __next__(self):
# TODO: Add try-except to in-place reduce traceback from the Exception
# See: https://github.com/pytorch/data/issues/284
if self._profiler_enabled:
with profiler_record_fn_context(self.datapipe):
return self._get_next()
else: # Decided against using `contextlib.nullcontext` for performance reasons
return self._get_next()
def __getattr__(self, name):
return getattr(self.iterator, name)
func = namespace["__iter__"]
# ``__iter__`` of IterDataPipe is a generator function
if inspect.isgeneratorfunction(func):
@functools.wraps(func)
def wrap_generator(*args, **kwargs):
gen = func(*args, **kwargs)
datapipe = args[0]
if datapipe._fast_forward_iterator:
it = datapipe._fast_forward_iterator
datapipe._fast_forward_iterator = None
datapipe._snapshot_state = _SnapshotState.Iterating
while True:
try:
yield next(it)
except StopIteration:
return
iterator_id = _set_datapipe_valid_iterator_id(
datapipe
) # This ID is tied to each created iterator
_profiler_enabled = torch.autograd._profiler_enabled()
try:
if _profiler_enabled:
with profiler_record_fn_context(datapipe):
response = gen.send(None)
else:
response = gen.send(None)
while True:
datapipe._number_of_samples_yielded += 1
request = yield response
# Pass through here every time `__next__` is called
if _profiler_enabled:
with profiler_record_fn_context(datapipe):
_check_iterator_valid(datapipe, iterator_id)
response = gen.send(request)
else: # Decided against using `contextlib.nullcontext` for performance reasons
_check_iterator_valid(datapipe, iterator_id)
response = gen.send(request)
except StopIteration:
return
except Exception as e:
# TODO: Simplify the traceback message to skip over `response = gen.send(None)`
# Part of https://github.com/pytorch/data/issues/284
datapipe = args[0]
msg = "thrown by __iter__ of"
single_iterator_msg = "single iterator per IterDataPipe constraint"
if hasattr(e.args, "__len__"):
full_msg = f"{msg} {datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})"
if len(e.args) == 0 or not isinstance(
e.args[0], str
): # If an exception message doesn't exist
e.args = (f"\nThis exception is {full_msg}",)
elif msg not in e.args[0] and single_iterator_msg not in e.args[0]:
e.args = (
e.args[0] + f"\nThis exception is {full_msg}",
) + e.args[1:]
raise
namespace["__iter__"] = wrap_generator
else: # ``__iter__`` of IterDataPipe is NOT a generator function
# IterDataPipe is an iterator with both ``__iter__`` and ``__next__``
# And ``__iter__`` may or may not return `self`
if "__next__" in namespace: # If `__next__` exists, put a wrapper around it
next_func = namespace["__next__"]
@functools.wraps(next_func)
def wrap_next(*args, **kwargs):
datapipe = args[0]
if torch.autograd._profiler_enabled():
with profiler_record_fn_context(datapipe):
result = next_func(*args, **kwargs)
else:
result = next_func(*args, **kwargs)
datapipe._number_of_samples_yielded += 1
return result
namespace["__next__"] = wrap_next
# Note that if the `__next__` and `__iter__` do something completely unrelated. It may cause issue but
# the user will be violating the iterator protocol. Potential issue:
# 1. Valid iterator ID may not update or checked properly
# 2. The number of samples yielded will be miscounted
# Regardless if `__next__` exists or not, `__iter__` needs a wrapper to track the number of valid iterators
@functools.wraps(func)
def wrap_iter(*args, **kwargs):
iter_ret = func(*args, **kwargs)
datapipe = args[0]
datapipe._snapshot_state = _SnapshotState.Iterating
if datapipe._fast_forward_iterator:
iter_ret = datapipe._fast_forward_iterator
datapipe._fast_forward_iterator = None
return iter_ret
iterator_id = _set_datapipe_valid_iterator_id(
datapipe
) # This ID is tied to each created iterator
return IteratorDecorator(
iter_ret, datapipe, iterator_id, "__next__" in namespace
)
namespace["__iter__"] = wrap_iter
```
|
=============================================================================================================================
SOURCE CODE FILE: _typing.py
LINES: 1
SIZE: 16.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\_typing.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# Taking reference from official Python typing
# https://github.com/python/cpython/blob/master/Lib/typing.py
import collections
import functools
import numbers
import sys
# Please check [Note: TypeMeta and TypeAlias]
# In case of metaclass conflict due to ABCMeta or _ProtocolMeta
# For Python 3.9, only Protocol in typing uses metaclass
from abc import ABCMeta
from collections.abc import Iterator
# TODO: Use TypeAlias when Python 3.6 is deprecated
from typing import ( # type: ignore[attr-defined]
_eval_type,
_GenericAlias,
_tp_cache,
_type_check,
_type_repr,
Any,
ForwardRef,
Generic,
get_type_hints,
TypeVar,
Union,
)
from torch.utils.data.datapipes._hook_iterator import _SnapshotState, hook_iterator
class GenericMeta(ABCMeta): # type: ignore[no-redef]
pass
class Integer(numbers.Integral):
pass
class Boolean(numbers.Integral):
pass
# Python 'type' object is not subscriptable
# Tuple[int, List, dict] -> valid
# tuple[int, list, dict] -> invalid
# Map Python 'type' to abstract base class
TYPE2ABC = {
bool: Boolean,
int: Integer,
float: numbers.Real,
complex: numbers.Complex,
dict: dict,
list: list,
set: set,
tuple: tuple,
None: type(None),
}
def issubtype(left, right, recursive=True):
r"""
Check if the left-side type is a subtype of the right-side type.
If any of type is a composite type like `Union` and `TypeVar` with
bounds, it would be expanded into a list of types and check all
of left-side types are subtypes of either one from right-side types.
"""
left = TYPE2ABC.get(left, left)
right = TYPE2ABC.get(right, right)
if right is Any or left == right:
return True
if isinstance(right, _GenericAlias):
if getattr(right, "__origin__", None) is Generic:
return True
if right == type(None):
return False
# Right-side type
constraints = _decompose_type(right)
if len(constraints) == 0 or Any in constraints:
return True
if left is Any:
return False
# Left-side type
variants = _decompose_type(left)
# all() will return True for empty variants
if len(variants) == 0:
return False
return all(
_issubtype_with_constraints(variant, constraints, recursive)
for variant in variants
)
def _decompose_type(t, to_list=True):
if isinstance(t, TypeVar):
if t.__bound__ is not None:
ts = [t.__bound__]
else:
# For T_co, __constraints__ is ()
ts = list(t.__constraints__)
elif hasattr(t, "__origin__") and t.__origin__ == Union:
ts = t.__args__
else:
if not to_list:
return None
ts = [t]
# Ignored: Generator has incompatible item type "object"; expected "Type[Any]"
ts = [TYPE2ABC.get(_t, _t) for _t in ts] # type: ignore[misc]
return ts
def _issubtype_with_constraints(variant, constraints, recursive=True):
r"""
Check if the variant is a subtype of either one from constraints.
For composite types like `Union` and `TypeVar` with bounds, they
would be expanded for testing.
"""
if variant in constraints:
return True
# [Note: Subtype for Union and TypeVar]
# Python typing is able to flatten Union[Union[...]] or Union[TypeVar].
# But it couldn't flatten the following scenarios:
# - Union[int, TypeVar[Union[...]]]
# - TypeVar[TypeVar[...]]
# So, variant and each constraint may be a TypeVar or a Union.
# In these cases, all of inner types from the variant are required to be
# extraced and verified as a subtype of any constraint. And, all of
# inner types from any constraint being a TypeVar or a Union are
# also required to be extracted and verified if the variant belongs to
# any of them.
# Variant
vs = _decompose_type(variant, to_list=False)
# Variant is TypeVar or Union
if vs is not None:
return all(_issubtype_with_constraints(v, constraints, recursive) for v in vs)
# Variant is not TypeVar or Union
if hasattr(variant, "__origin__") and variant.__origin__ is not None:
v_origin = variant.__origin__
# In Python-3.9 typing library untyped generics do not have args
v_args = getattr(variant, "__args__", None)
else:
v_origin = variant
v_args = None
# Constraints
for constraint in constraints:
cs = _decompose_type(constraint, to_list=False)
# Constraint is TypeVar or Union
if cs is not None:
if _issubtype_with_constraints(variant, cs, recursive):
return True
# Constraint is not TypeVar or Union
else:
# __origin__ can be None for plain list, tuple, ... in Python 3.6
if hasattr(constraint, "__origin__") and constraint.__origin__ is not None:
c_origin = constraint.__origin__
if v_origin == c_origin:
if not recursive:
return True
# In Python-3.9 typing library untyped generics do not have args
c_args = getattr(constraint, "__args__", None)
if c_args is None or len(c_args) == 0:
return True
if (
v_args is not None
and len(v_args) == len(c_args)
and all(
issubtype(v_arg, c_arg)
for v_arg, c_arg in zip(v_args, c_args)
)
):
return True
# Tuple[int] -> Tuple
else:
if v_origin == constraint:
return True
return False
def issubinstance(data, data_type):
if not issubtype(type(data), data_type, recursive=False):
return False
# In Python-3.9 typing library __args__ attribute is not defined for untyped generics
dt_args = getattr(data_type, "__args__", None)
if isinstance(data, tuple):
if dt_args is None or len(dt_args) == 0:
return True
if len(dt_args) != len(data):
return False
return all(issubinstance(d, t) for d, t in zip(data, dt_args))
elif isinstance(data, (list, set)):
if dt_args is None or len(dt_args) == 0:
return True
t = dt_args[0]
return all(issubinstance(d, t) for d in data)
elif isinstance(data, dict):
if dt_args is None or len(dt_args) == 0:
return True
kt, vt = dt_args
return all(
issubinstance(k, kt) and issubinstance(v, vt) for k, v in data.items()
)
return True
# [Note: TypeMeta and TypeAlias]
# In order to keep compatibility for Python 3.6, use Meta for the typing.
# TODO: When PyTorch drops the support for Python 3.6, it can be converted
# into the Alias system and using `__class_getitem__` for DataPipe. The
# typing system will gain benefit of performance and resolving metaclass
# conflicts as elaborated in https://www.python.org/dev/peps/pep-0560/
class _DataPipeType:
r"""Save type annotation in `param`."""
def __init__(self, param):
self.param = param
def __repr__(self):
return _type_repr(self.param)
def __eq__(self, other):
if isinstance(other, _DataPipeType):
return self.param == other.param
return NotImplemented
def __hash__(self):
return hash(self.param)
def issubtype(self, other):
if isinstance(other.param, _GenericAlias):
if getattr(other.param, "__origin__", None) is Generic:
return True
if isinstance(other, _DataPipeType):
return issubtype(self.param, other.param)
if isinstance(other, type):
return issubtype(self.param, other)
raise TypeError(f"Expected '_DataPipeType' or 'type', but found {type(other)}")
def issubtype_of_instance(self, other):
return issubinstance(other, self.param)
# Default type for DataPipe without annotation
_T_co = TypeVar("_T_co", covariant=True)
_DEFAULT_TYPE = _DataPipeType(Generic[_T_co])
class _DataPipeMeta(GenericMeta):
r"""
Metaclass for `DataPipe`.
Add `type` attribute and `__init_subclass__` based on the type, and validate the return hint of `__iter__`.
Note that there is subclass `_IterDataPipeMeta` specifically for `IterDataPipe`.
"""
type: _DataPipeType
def __new__(cls, name, bases, namespace, **kwargs):
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
# TODO: the statements below are not reachable by design as there is a bug and typing is low priority for now.
cls.__origin__ = None
if "type" in namespace:
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
namespace["__type_class__"] = False
# For plain derived class without annotation
for base in bases:
if isinstance(base, _DataPipeMeta):
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
namespace.update(
{"type": _DEFAULT_TYPE, "__init_subclass__": _dp_init_subclass}
)
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
def __init__(self, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace, **kwargs) # type: ignore[call-overload]
# TODO: Fix isinstance bug
@_tp_cache
def _getitem_(self, params):
if params is None:
raise TypeError(f"{self.__name__}[t]: t can not be None")
if isinstance(params, str):
params = ForwardRef(params)
if not isinstance(params, tuple):
params = (params,)
msg = f"{self.__name__}[t]: t must be a type"
params = tuple(_type_check(p, msg) for p in params)
if isinstance(self.type.param, _GenericAlias):
orig = getattr(self.type.param, "__origin__", None)
if isinstance(orig, type) and orig is not Generic:
p = self.type.param[params] # type: ignore[index]
t = _DataPipeType(p)
l = len(str(self.type)) + 2
name = self.__name__[:-l]
name = name + "[" + str(t) + "]"
bases = (self,) + self.__bases__
return self.__class__(
name,
bases,
{
"__init_subclass__": _dp_init_subclass,
"type": t,
"__type_class__": True,
},
)
if len(params) > 1:
raise TypeError(
f"Too many parameters for {self} actual {len(params)}, expected 1"
)
t = _DataPipeType(params[0])
if not t.issubtype(self.type):
raise TypeError(
f"Can not subclass a DataPipe[{t}] from DataPipe[{self.type}]"
)
# Types are equal, fast path for inheritance
if self.type == t:
return self
name = self.__name__ + "[" + str(t) + "]"
bases = (self,) + self.__bases__
return self.__class__(
name,
bases,
{"__init_subclass__": _dp_init_subclass, "__type_class__": True, "type": t},
)
# TODO: Fix isinstance bug
def _eq_(self, other):
if not isinstance(other, _DataPipeMeta):
return NotImplemented
if self.__origin__ is None or other.__origin__ is None: # type: ignore[has-type]
return self is other
return (
self.__origin__ == other.__origin__ # type: ignore[has-type]
and self.type == other.type
)
# TODO: Fix isinstance bug
def _hash_(self):
return hash((self.__name__, self.type))
class _IterDataPipeMeta(_DataPipeMeta):
r"""
Metaclass for `IterDataPipe` and inherits from `_DataPipeMeta`.
Add various functions for behaviors specific to `IterDataPipe`.
"""
def __new__(cls, name, bases, namespace, **kwargs):
if "reset" in namespace:
reset_func = namespace["reset"]
@functools.wraps(reset_func)
def conditional_reset(*args, **kwargs):
r"""
Only execute DataPipe's `reset()` method if `_SnapshotState` is `Iterating` or `NotStarted`.
This allows recently restored DataPipe to preserve its restored state during the initial `__iter__` call.
"""
datapipe = args[0]
if datapipe._snapshot_state in (
_SnapshotState.Iterating,
_SnapshotState.NotStarted,
):
# Reset `NotStarted` is necessary because the `source_datapipe` of a DataPipe might have
# already begun iterating.
datapipe._number_of_samples_yielded = 0
datapipe._fast_forward_iterator = None
reset_func(*args, **kwargs)
datapipe._snapshot_state = _SnapshotState.Iterating
namespace["reset"] = conditional_reset
if "__iter__" in namespace:
hook_iterator(namespace)
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
def _dp_init_subclass(sub_cls, *args, **kwargs):
# Add function for datapipe instance to reinforce the type
sub_cls.reinforce_type = reinforce_type
# TODO:
# - add global switch for type checking at compile-time
# Ignore internal type class
if getattr(sub_cls, "__type_class__", False):
return
# Check if the string type is valid
if isinstance(sub_cls.type.param, ForwardRef):
base_globals = sys.modules[sub_cls.__module__].__dict__
try:
param = _eval_type(sub_cls.type.param, base_globals, locals())
sub_cls.type.param = param
except TypeError as e:
raise TypeError(
f"{sub_cls.type.param.__forward_arg__} is not supported by Python typing"
) from e
if "__iter__" in sub_cls.__dict__:
iter_fn = sub_cls.__dict__["__iter__"]
hints = get_type_hints(iter_fn)
if "return" in hints:
return_hint = hints["return"]
# Plain Return Hint for Python 3.6
if return_hint == Iterator:
return
if not (
hasattr(return_hint, "__origin__")
and (
return_hint.__origin__ == Iterator
or return_hint.__origin__ == collections.abc.Iterator
)
):
raise TypeError(
"Expected 'Iterator' as the return annotation for `__iter__` of {}"
", but found {}".format(
sub_cls.__name__, _type_repr(hints["return"])
)
)
data_type = return_hint.__args__[0]
if not issubtype(data_type, sub_cls.type.param):
raise TypeError(
f"Expected return type of '__iter__' as a subtype of {sub_cls.type},"
f" but found {_type_repr(data_type)} for {sub_cls.__name__}"
)
def reinforce_type(self, expected_type):
r"""
Reinforce the type for DataPipe instance.
And the 'expected_type' is required to be a subtype of the original type
hint to restrict the type requirement of DataPipe instance.
"""
if isinstance(expected_type, tuple):
expected_type = tuple[expected_type] # type: ignore[valid-type]
_type_check(expected_type, msg="'expected_type' must be a type")
if not issubtype(expected_type, self.type.param):
raise TypeError(
f"Expected 'expected_type' as subtype of {self.type}, but found {_type_repr(expected_type)}"
)
self.type = _DataPipeType(expected_type)
return self
```
|
========================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\dataframe\__init__.py
ENCODING: utf-8
```py
from torch.utils.data.datapipes.dataframe.dataframes import (
CaptureDataFrame,
DFIterDataPipe,
)
from torch.utils.data.datapipes.dataframe.datapipes import DataFramesAsTuplesPipe
__all__ = ["CaptureDataFrame", "DFIterDataPipe", "DataFramesAsTuplesPipe"]
# Please keep this list sorted
assert __all__ == sorted(__all__)
```
|
=================================================================================================================================================
SOURCE CODE FILE: dataframe_wrapper.py
LINES: 1
SIZE: 3.34 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\dataframe\dataframe_wrapper.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from typing import Any, Optional
_pandas: Any = None
_WITH_PANDAS: Optional[bool] = None
def _try_import_pandas() -> bool:
try:
import pandas # type: ignore[import]
global _pandas
_pandas = pandas
return True
except ImportError:
return False
# pandas used only for prototyping, will be shortly replaced with TorchArrow
def _with_pandas() -> bool:
global _WITH_PANDAS
if _WITH_PANDAS is None:
_WITH_PANDAS = _try_import_pandas()
return _WITH_PANDAS
class PandasWrapper:
@classmethod
def create_dataframe(cls, data, columns):
if not _with_pandas():
raise RuntimeError("DataFrames prototype requires pandas to function")
return _pandas.DataFrame(data, columns=columns) # type: ignore[union-attr]
@classmethod
def is_dataframe(cls, data):
if not _with_pandas():
return False
return isinstance(data, _pandas.core.frame.DataFrame) # type: ignore[union-attr]
@classmethod
def is_column(cls, data):
if not _with_pandas():
return False
return isinstance(data, _pandas.core.series.Series) # type: ignore[union-attr]
@classmethod
def iterate(cls, data):
if not _with_pandas():
raise RuntimeError("DataFrames prototype requires pandas to function")
yield from data.itertuples(index=False)
@classmethod
def concat(cls, buffer):
if not _with_pandas():
raise RuntimeError("DataFrames prototype requires pandas to function")
return _pandas.concat(buffer) # type: ignore[union-attr]
@classmethod
def get_item(cls, data, idx):
if not _with_pandas():
raise RuntimeError("DataFrames prototype requires pandas to function")
return data[idx : idx + 1]
@classmethod
def get_len(cls, df):
if not _with_pandas():
raise RuntimeError("DataFrames prototype requires pandas to function")
return len(df.index)
@classmethod
def get_columns(cls, df):
if not _with_pandas():
raise RuntimeError("DataFrames prototype requires pandas to function")
return list(df.columns.values.tolist())
# When you build own implementation just override it with dataframe_wrapper.set_df_wrapper(new_wrapper_class)
default_wrapper = PandasWrapper
def get_df_wrapper():
return default_wrapper
def set_df_wrapper(wrapper):
global default_wrapper
default_wrapper = wrapper
def create_dataframe(data, columns=None):
wrapper = get_df_wrapper()
return wrapper.create_dataframe(data, columns)
def is_dataframe(data):
wrapper = get_df_wrapper()
return wrapper.is_dataframe(data)
def get_columns(data):
wrapper = get_df_wrapper()
return wrapper.get_columns(data)
def is_column(data):
wrapper = get_df_wrapper()
return wrapper.is_column(data)
def concat(buffer):
wrapper = get_df_wrapper()
return wrapper.concat(buffer)
def iterate(data):
wrapper = get_df_wrapper()
return wrapper.iterate(data)
def get_item(data, idx):
wrapper = get_df_wrapper()
return wrapper.get_item(data, idx)
def get_len(df):
wrapper = get_df_wrapper()
return wrapper.get_len(df)
```
|
==========================================================================================================================================
SOURCE CODE FILE: dataframes.py
LINES: 2
SIZE: 13.60 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\dataframe\dataframes.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from typing import Any, Optional
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.dataframe.structures import DataChunkDF
from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe
# TODO(VitalyFedyunin): Add error when two different traces get combined
__all__ = [
"Capture",
"CaptureA",
"CaptureAdd",
"CaptureCall",
"CaptureControl",
"CaptureDataFrame",
"CaptureDataFrameWithDataPipeOps",
"CaptureF",
"CaptureGetAttr",
"CaptureGetItem",
"CaptureInitial",
"CaptureLikeMock",
"CaptureMul",
"CaptureSetItem",
"CaptureSub",
"CaptureVariable",
"CaptureVariableAssign",
"DataFrameTracer",
"DataFrameTracedOps",
"disable_capture",
"get_val",
]
def disable_capture():
CaptureControl.disabled = True
class CaptureControl:
disabled = False
class DataFrameTracedOps(DFIterDataPipe):
def __init__(self, source_datapipe, output_var):
self.source_datapipe = source_datapipe
self.output_var = output_var
def __iter__(self):
for item in self.source_datapipe:
yield self.output_var.apply_ops(item)
# TODO(VitalyFedyunin): Extract this list from the DFIterDataPipe registred functions
DATAPIPES_OPS = [
"_dataframes_as_tuples",
"groupby",
"_dataframes_filter",
"map",
"to_datapipe",
"shuffle",
"concat",
"batch",
"_dataframes_per_row",
"_dataframes_concat",
"_dataframes_shuffle",
]
UNIMPLEMENTED_ATTR = ["__deepcopy__", "__setstate__", "is_shardable", "apply_sharding"]
class Capture:
# TODO: All operations are shared across entire InitialCapture, need to figure out what if we join two captures
def __init__(self, schema_df=None):
self.ctx = {"operations": [], "variables": [], "schema_df": schema_df}
def __str__(self):
return self._ops_str()
def _ops_str(self):
res = ""
for op in self.ctx["operations"]:
if len(res) > 0:
res += "\n"
res += str(op)
return res
def __getstate__(self):
# TODO(VitalyFedyunin): Currently can't pickle (why?)
self.ctx["schema_df"] = None
for var in self.ctx["variables"]:
var.calculated_value = None
state = {}
for item in self.__dict__:
state[item] = getattr(self, item)
return state
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __getattr__(self, attrname):
if attrname == "kwarg" or attrname == "kwargs":
raise RuntimeError("no kwargs!")
if attrname in ["__deepcopy__"]:
raise AttributeError
result = CaptureGetAttr(self, attrname, ctx=self.ctx)
return result
def __getitem__(self, key):
return CaptureGetItem(self, key, ctx=self.ctx)
def __setitem__(self, key, value):
self.ctx["operations"].append(CaptureSetItem(self, key, value, ctx=self.ctx))
def __add__(self, add_val):
res = CaptureAdd(self, add_val, ctx=self.ctx)
var = CaptureVariable(res, ctx=self.ctx)
self.ctx["operations"].append(
CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)
)
return var
def __sub__(self, add_val):
res = CaptureSub(self, add_val, ctx=self.ctx)
var = CaptureVariable(res, ctx=self.ctx)
self.ctx["operations"].append(
CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)
)
return var
def __mul__(self, add_val):
res = CaptureMul(self, add_val, ctx=self.ctx)
var = CaptureVariable(res, ctx=self.ctx)
t = CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)
self.ctx["operations"].append(t)
return var
def _is_context_empty(self):
return len(self.ctx["operations"]) == 0 and len(self.ctx["variables"]) == 0
def apply_ops_2(self, dataframe):
# TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer)
self.ctx["variables"][0].calculated_value = dataframe
for op in self.ctx["operations"]:
op.execute()
@property
def columns(self):
self.apply_ops_2(self.ctx["schema_df"])
value = self.execute()
return value.columns
# TODO(VitalyFedyunin): Add tests
# TODO(VitalyFedyunin): Need to join context if one of them are empty because we used capture
def __call__(self, *args, **kwargs):
# TODO: Check if args or kwargs have more than one different context
if self._is_context_empty():
# TODO: Allow CaptureA to take context from mock
for arg in args:
if isinstance(arg, Capture) and not arg._is_context_empty():
self.ctx = arg.ctx
break
if self._is_context_empty():
for k, v in kwargs.items():
if isinstance(k, Capture) and not k._is_context_empty():
self.ctx = k.ctx
break
if isinstance(v, Capture) and not v._is_context_empty():
self.ctx = v.ctx
break
res = CaptureCall(self, ctx=self.ctx, args=args, kwargs=kwargs)
var = CaptureVariable(None, ctx=self.ctx)
t = CaptureVariableAssign(ctx=self.ctx, variable=var, value=res)
self.ctx["operations"].append(t)
return var
class CaptureF(Capture):
def __init__(self, ctx=None, **kwargs):
if ctx is None:
self.ctx = {"operations": [], "variables": []}
else:
self.ctx = ctx
self.kwargs = kwargs
class CaptureA(CaptureF):
def __str__(self):
return f"{self.kwargs['name']}"
def execute(self):
value = self.kwargs["real_attribute"]
return value
class CaptureLikeMock:
def __init__(self, name):
import unittest.mock as mock
# TODO(VitalyFedyunin): Do not use provate function here, copy own implementation instead.
get_target, attribute = mock._get_target(name) # type: ignore[attr-defined]
self.get_target = get_target
self.attribute = attribute
self.name = name
def __enter__(self):
self.save = getattr(self.get_target(), self.attribute)
capt = CaptureA(name=self.name, real_attribute=self.save)
setattr(self.get_target(), self.attribute, capt)
def __exit__(self, *exc_info):
setattr(self.get_target(), self.attribute, self.save)
class CaptureCall(Capture):
def __init__(self, callable, ctx=None, **kwargs):
if ctx is None:
self.ctx = {"operations": [], "variables": []}
else:
self.ctx = ctx
self.kwargs = kwargs
self.callable = callable
def __str__(self):
return "{callable}({args},{kwargs})".format(
callable=self.callable, **self.kwargs
)
def execute(self):
# TODO: VitalyFedyunin execute kwargs and maybe nested structures
executed_args = []
for arg in self.kwargs["args"]:
if isinstance(arg, Capture):
executed_args.append(arg.execute())
else:
executed_args.append(arg)
left = get_val(self.callable)
return left(*executed_args, **self.kwargs["kwargs"])
class CaptureVariableAssign(CaptureF):
def __str__(self):
variable = self.kwargs["variable"]
value = self.kwargs["value"]
return f"{variable} = {value}"
def execute(self):
self.kwargs["variable"].calculated_value = self.kwargs["value"].execute()
class CaptureVariable(Capture):
# TODO(VitalyFedyunin): This should be atomic and thread safe
names_idx = 0
def __init__(self, value, ctx):
if CaptureControl.disabled:
raise RuntimeError("Attempting to create capture variable with capture off")
self.ctx = ctx
self.value = value
self.name = f"var_{CaptureVariable.names_idx}"
CaptureVariable.names_idx += 1
self.ctx["variables"].append(self)
def __str__(self):
return self.name
def execute(self):
return self.calculated_value
def apply_ops(self, dataframe):
# TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer)
self.ctx["variables"][0].calculated_value = dataframe
for op in self.ctx["operations"]:
op.execute()
return self.calculated_value
class CaptureGetItem(Capture):
def __init__(self, left, key, ctx):
self.ctx = ctx
self.left = left
self.key = key
def __str__(self):
return f"{self.left}[{get_val(self.key)}]"
def execute(self):
left = self.left.execute()
return left[self.key]
class CaptureSetItem(Capture):
def __init__(self, left, key, value, ctx):
self.ctx = ctx
self.left = left
self.key = key
self.value = value
def __str__(self):
return f"{self.left}[{get_val(self.key)}] = {self.value}"
def execute(self):
left = self.left.execute()
value = self.value.execute()
left[self.key] = value
class CaptureAdd(Capture):
def __init__(self, left, right, ctx):
self.ctx = ctx
self.left = left
self.right = right
def __str__(self):
return f"{self.left} + {self.right}"
def execute(self):
return get_val(self.left) + get_val(self.right)
class CaptureMul(Capture):
def __init__(self, left, right, ctx):
self.ctx = ctx
self.left = left
self.right = right
def __str__(self):
return f"{self.left} * {self.right}"
def execute(self):
return get_val(self.left) * get_val(self.right)
class CaptureSub(Capture):
def __init__(self, left, right, ctx):
self.ctx = ctx
self.left = left
self.right = right
def __str__(self):
return f"{self.left} - {self.right}"
def execute(self):
return get_val(self.left) - get_val(self.right)
class CaptureGetAttr(Capture):
def __init__(self, src, name, ctx):
self.ctx = ctx
self.src = src
self.name = name
def __str__(self):
return f"{self.src}.{self.name}"
def execute(self):
val = get_val(self.src)
return getattr(val, self.name)
def get_val(capture):
if isinstance(capture, Capture):
return capture.execute()
elif isinstance(capture, str):
return f'"{capture}"'
else:
return capture
class CaptureInitial(CaptureVariable):
def __init__(self, schema_df=None):
new_ctx: dict[str, list[Any]] = {
"operations": [],
"variables": [],
"schema_df": schema_df,
}
super().__init__(None, new_ctx)
self.name = f"input_{self.name}"
class CaptureDataFrame(CaptureInitial):
pass
class CaptureDataFrameWithDataPipeOps(CaptureDataFrame):
def as_datapipe(self):
return DataFrameTracedOps(self.ctx["variables"][0].source_datapipe, self)
def raw_iterator(self):
return self.as_datapipe().__iter__()
def __iter__(self):
return iter(self._dataframes_as_tuples())
def batch(self, batch_size=10, drop_last: bool = False, wrapper_class=DataChunkDF):
dp = self._dataframes_per_row()._dataframes_concat(batch_size)
dp = dp.as_datapipe().batch(1, drop_last=drop_last, wrapper_class=wrapper_class)
dp._dp_contains_dataframe = True
return dp
def groupby(
self,
group_key_fn,
*,
buffer_size=10000,
group_size=None,
guaranteed_group_size=None,
drop_remaining=False,
):
dp = self._dataframes_per_row()
dp = dp.as_datapipe().groupby(
group_key_fn,
buffer_size=buffer_size,
group_size=group_size,
guaranteed_group_size=guaranteed_group_size,
drop_remaining=drop_remaining,
)
return dp
def shuffle(self, *args, **kwargs):
return self._dataframes_shuffle(*args, **kwargs)
def filter(self, *args, **kwargs):
return self._dataframes_filter(*args, **kwargs)
def collate(self, *args, **kwargs):
raise RuntimeError("Can't collate unbatched DataFrames stream")
def __getattr__(self, attrname): # ?
if attrname in UNIMPLEMENTED_ATTR:
raise AttributeError("Attempting to get ", attrname)
if attrname in DATAPIPES_OPS:
return (self.as_datapipe()).__getattr__(attrname)
return super().__getattr__(attrname)
@functional_datapipe("trace_as_dataframe")
class DataFrameTracer(CaptureDataFrameWithDataPipeOps, IterDataPipe): # type: ignore[misc]
source_datapipe: Optional[Any] = None
# TODO(VitalyFedyunin): Must implement all special functions of datapipes
def set_shuffle_settings(self, *args, **kwargs):
pass
def is_shardable(self):
return False
def __init__(self, source_datapipe, schema_df=None):
self.source_datapipe = source_datapipe
if schema_df is None:
schema_df = next(iter(self.source_datapipe))
super().__init__(schema_df=schema_df)
```
|
=========================================================================================================================================
SOURCE CODE FILE: datapipes.py
LINES: 1
SIZE: 4.56 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\dataframe\datapipes.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import random
from typing import Any
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe
__all__ = [
"ConcatDataFramesPipe",
"DataFramesAsTuplesPipe",
"ExampleAggregateAsDataFrames",
"FilterDataFramesPipe",
"PerRowDataFramesPipe",
"ShuffleDataFramesPipe",
]
@functional_datapipe("_dataframes_as_tuples")
class DataFramesAsTuplesPipe(IterDataPipe):
def __init__(self, source_datapipe):
self.source_datapipe = source_datapipe
def __iter__(self):
for df in self.source_datapipe:
# for record in df.to_records(index=False):
yield from df_wrapper.iterate(df)
@functional_datapipe("_dataframes_per_row", enable_df_api_tracing=True)
class PerRowDataFramesPipe(DFIterDataPipe):
def __init__(self, source_datapipe):
self.source_datapipe = source_datapipe
def __iter__(self):
for df in self.source_datapipe:
# TODO(VitalyFedyunin): Replacing with TorchArrow only API, as we are dropping pandas as followup
for i in range(len(df)):
yield df[i : i + 1]
@functional_datapipe("_dataframes_concat", enable_df_api_tracing=True)
class ConcatDataFramesPipe(DFIterDataPipe):
def __init__(self, source_datapipe, batch=3):
self.source_datapipe = source_datapipe
self.n_batch = batch
def __iter__(self):
buffer = []
for df in self.source_datapipe:
buffer.append(df)
if len(buffer) == self.n_batch:
yield df_wrapper.concat(buffer)
buffer = []
if len(buffer):
yield df_wrapper.concat(buffer)
@functional_datapipe("_dataframes_shuffle", enable_df_api_tracing=True)
class ShuffleDataFramesPipe(DFIterDataPipe):
def __init__(self, source_datapipe):
self.source_datapipe = source_datapipe
def __iter__(self):
size = None
all_buffer: list[Any] = []
for df in self.source_datapipe:
if size is None:
size = df_wrapper.get_len(df)
all_buffer.extend(
df_wrapper.get_item(df, i) for i in range(df_wrapper.get_len(df))
)
random.shuffle(all_buffer)
buffer = []
for df in all_buffer:
buffer.append(df)
if len(buffer) == size:
yield df_wrapper.concat(buffer)
buffer = []
if len(buffer):
yield df_wrapper.concat(buffer)
@functional_datapipe("_dataframes_filter", enable_df_api_tracing=True)
class FilterDataFramesPipe(DFIterDataPipe):
def __init__(self, source_datapipe, filter_fn):
self.source_datapipe = source_datapipe
self.filter_fn = filter_fn
def __iter__(self):
size = None
all_buffer = []
filter_res = []
for df in self.source_datapipe:
if size is None:
size = len(df.index)
for i in range(len(df.index)):
all_buffer.append(df[i : i + 1])
filter_res.append(self.filter_fn(df.iloc[i]))
buffer = []
for df, res in zip(all_buffer, filter_res):
if res:
buffer.append(df)
if len(buffer) == size:
yield df_wrapper.concat(buffer)
buffer = []
if len(buffer):
yield df_wrapper.concat(buffer)
@functional_datapipe("_to_dataframes_pipe", enable_df_api_tracing=True)
class ExampleAggregateAsDataFrames(DFIterDataPipe):
def __init__(self, source_datapipe, dataframe_size=10, columns=None):
self.source_datapipe = source_datapipe
self.columns = columns
self.dataframe_size = dataframe_size
def _as_list(self, item):
try:
return list(item)
except (
Exception
): # TODO(VitalyFedyunin): Replace with better iterable exception
return [item]
def __iter__(self):
aggregate = []
for item in self.source_datapipe:
aggregate.append(self._as_list(item))
if len(aggregate) == self.dataframe_size:
yield df_wrapper.create_dataframe(aggregate, columns=self.columns)
aggregate = []
if len(aggregate) > 0:
yield df_wrapper.create_dataframe(aggregate, columns=self.columns)
```
|
==========================================================================================================================================
SOURCE CODE FILE: structures.py
LINES: 1
SIZE: 0.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\dataframe\structures.py
ENCODING: utf-8
```py
from collections.abc import Iterator
from typing import Any
from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
from torch.utils.data.datapipes.datapipe import DataChunk
__all__ = ["DataChunkDF"]
class DataChunkDF(DataChunk):
"""DataChunkDF iterating over individual items inside of DataFrame containers, to access DataFrames user `raw_iterator`."""
def __iter__(self) -> Iterator[Any]:
for df in self.items:
yield from df_wrapper.iterate(df)
def __len__(self) -> int:
total_len = 0
for df in self.items:
total_len += df_wrapper.get_len(df)
return total_len
```
|
==============================================================================================================================
SOURCE CODE FILE: datapipe.py
LINES: 1
SIZE: 16.81 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\datapipe.py
ENCODING: utf-8
```py
import functools
import pickle
from collections.abc import Iterable, Iterator
from typing import Callable, Optional, TypeVar
from torch.utils._import_utils import import_dill
from torch.utils.data.datapipes._hook_iterator import _SnapshotState
from torch.utils.data.datapipes._typing import _DataPipeMeta, _IterDataPipeMeta
from torch.utils.data.datapipes.utils.common import (
_deprecation_warning,
_iter_deprecated_functional_names,
_map_deprecated_functional_names,
)
from torch.utils.data.dataset import Dataset, IterableDataset
dill = import_dill()
HAS_DILL = dill is not None
__all__ = [
"DataChunk",
"DFIterDataPipe",
"IterDataPipe",
"MapDataPipe",
]
_T = TypeVar("_T")
_T_co = TypeVar("_T_co", covariant=True)
UNTRACABLE_DATAFRAME_PIPES = [
"batch", # As it returns DataChunks
"groupby", # As it returns DataChunks
"_dataframes_as_tuples", # As it unpacks DF
"trace_as_dataframe", # As it used to mark DF for tracing
]
class DataChunk(list[_T]):
def __init__(self, items: Iterable[_T]) -> None:
items = list(items)
super().__init__(items)
self.items = items
def as_str(self, indent: str = "") -> str:
return indent + "[" + ", ".join(str(i) for i in iter(self)) + "]"
def __iter__(self) -> Iterator[_T]:
yield from super().__iter__()
def raw_iterator(self) -> Iterator[_T]:
yield from self.items
class IterDataPipe(IterableDataset[_T_co], metaclass=_IterDataPipeMeta):
r"""
Iterable-style DataPipe.
All DataPipes that represent an iterable of data samples should subclass this.
This style of DataPipes is particularly useful when data come from a stream, or
when the number of samples is too large to fit them all in memory. ``IterDataPipe`` is lazily initialized and its
elements are computed only when ``next()`` is called on the iterator of an ``IterDataPipe``.
All subclasses should overwrite :meth:`__iter__`, which would return an
iterator of samples in this DataPipe. Calling ``__iter__`` of an ``IterDataPipe`` automatically invokes its
method ``reset()``, which by default performs no operation. When writing a custom ``IterDataPipe``, users should
override ``reset()`` if necessary. The common usages include resetting buffers, pointers,
and various state variables within the custom ``IterDataPipe``.
Note:
Only `one` iterator can be valid for each ``IterDataPipe`` at a time,
and the creation a second iterator will invalidate the first one. This constraint is necessary because
some ``IterDataPipe`` have internal buffers, whose states can become invalid if there are multiple iterators.
The code example below presents details on how this constraint looks in practice.
If you have any feedback related to this constraint, please see `GitHub IterDataPipe Single Iterator Issue`_.
These DataPipes can be invoked in two ways, using the class constructor or applying their
functional form onto an existing ``IterDataPipe`` (recommended, available to most but not all DataPipes).
You can chain multiple `IterDataPipe` together to form a pipeline that will perform multiple
operations in succession.
.. _GitHub IterDataPipe Single Iterator Issue:
https://github.com/pytorch/data/issues/45
Note:
When a subclass is used with :class:`~torch.utils.data.DataLoader`, each
item in the DataPipe will be yielded from the :class:`~torch.utils.data.DataLoader`
iterator. When :attr:`num_workers > 0`, each worker process will have a
different copy of the DataPipe object, so it is often desired to configure
each copy independently to avoid having duplicate data returned from the
workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker
process, returns information about the worker. It can be used in either the
dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's
:attr:`worker_init_fn` option to modify each copy's behavior.
Examples:
General Usage:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper, Mapper
>>> dp = IterableWrapper(range(10))
>>> map_dp_1 = Mapper(dp, lambda x: x + 1) # Using class constructor
>>> map_dp_2 = dp.map(lambda x: x + 1) # Using functional form (recommended)
>>> list(map_dp_1)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> list(map_dp_2)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> filter_dp = map_dp_1.filter(lambda x: x % 2 == 0)
>>> list(filter_dp)
[2, 4, 6, 8, 10]
Single Iterator Constraint Example:
>>> from torchdata.datapipes.iter import IterableWrapper, Mapper
>>> source_dp = IterableWrapper(range(10))
>>> it1 = iter(source_dp)
>>> list(it1)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> it1 = iter(source_dp)
>>> it2 = iter(source_dp) # The creation of a new iterator invalidates `it1`
>>> next(it2)
0
>>> next(it1) # Further usage of `it1` will raise a `RunTimeError`
"""
functions: dict[str, Callable] = {}
reduce_ex_hook: Optional[Callable] = None
getstate_hook: Optional[Callable] = None
str_hook: Optional[Callable] = None
repr_hook: Optional[Callable] = None
_valid_iterator_id: Optional[int] = None
_number_of_samples_yielded: int = 0
_snapshot_state: _SnapshotState = _SnapshotState.NotStarted
_fast_forward_iterator: Optional[Iterator] = None
def __iter__(self) -> Iterator[_T_co]:
return self
def __getattr__(self, attribute_name):
if attribute_name in IterDataPipe.functions:
if attribute_name in _iter_deprecated_functional_names:
kwargs = _iter_deprecated_functional_names[attribute_name]
_deprecation_warning(**kwargs)
f = IterDataPipe.functions[attribute_name]
function = functools.partial(f, self)
functools.update_wrapper(wrapper=function, wrapped=f, assigned=("__doc__",))
return function
else:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{attribute_name}"
)
@classmethod
def register_function(cls, function_name, function):
cls.functions[function_name] = function
@classmethod
def register_datapipe_as_function(
cls, function_name, cls_to_register, enable_df_api_tracing=False
):
if function_name in cls.functions:
raise Exception( # noqa: TRY002
f"Unable to add DataPipe function name {function_name} as it is already taken"
)
def class_function(cls, enable_df_api_tracing, source_dp, *args, **kwargs):
result_pipe = cls(source_dp, *args, **kwargs)
if isinstance(result_pipe, IterDataPipe):
if enable_df_api_tracing or isinstance(source_dp, DFIterDataPipe):
if function_name not in UNTRACABLE_DATAFRAME_PIPES:
result_pipe = result_pipe.trace_as_dataframe()
return result_pipe
function = functools.partial(
class_function, cls_to_register, enable_df_api_tracing
)
functools.update_wrapper(
wrapper=function, wrapped=cls_to_register, assigned=("__doc__",)
)
cls.functions[function_name] = function
def __getstate__(self):
"""
Serialize `lambda` functions when `dill` is available.
If this doesn't cover your custom DataPipe's use case, consider writing custom methods for
`__getstate__` and `__setstate__`, or use `pickle.dumps` for serialization.
"""
state = self.__dict__
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __reduce_ex__(self, *args, **kwargs):
if IterDataPipe.reduce_ex_hook is not None:
try:
return IterDataPipe.reduce_ex_hook(self)
except NotImplementedError:
pass
return super().__reduce_ex__(*args, **kwargs)
@classmethod
def set_getstate_hook(cls, hook_fn):
if IterDataPipe.getstate_hook is not None and hook_fn is not None:
raise RuntimeError("Attempt to override existing getstate_hook")
IterDataPipe.getstate_hook = hook_fn
@classmethod
def set_reduce_ex_hook(cls, hook_fn):
if IterDataPipe.reduce_ex_hook is not None and hook_fn is not None:
raise RuntimeError("Attempt to override existing reduce_ex_hook")
IterDataPipe.reduce_ex_hook = hook_fn
def __repr__(self):
if self.repr_hook is not None:
return self.repr_hook(self)
# Instead of showing <torch. ... .MapperIterDataPipe object at 0x.....>, return the class name
return str(self.__class__.__qualname__)
def __str__(self):
if self.str_hook is not None:
return self.str_hook(self)
# Instead of showing <torch. ... .MapperIterDataPipe object at 0x.....>, return the class name
return str(self.__class__.__qualname__)
def __dir__(self):
# for auto-completion in a REPL (e.g. Jupyter notebook)
return list(super().__dir__()) + list(self.functions.keys())
def reset(self) -> None:
r"""
Reset the `IterDataPipe` to the initial state.
By default, no-op. For subclasses of `IterDataPipe`, depending on their functionalities,
they may want to override this method with implementations that
may clear the buffers and reset pointers of the DataPipe.
The `reset` method is always called when `__iter__` is called as part of `hook_iterator`.
"""
class DFIterDataPipe(IterDataPipe):
def _is_dfpipe(self):
return True
class MapDataPipe(Dataset[_T_co], metaclass=_DataPipeMeta):
r"""
Map-style DataPipe.
All datasets that represent a map from keys to data samples should subclass this.
Subclasses should overwrite :meth:`__getitem__`, supporting fetching a
data sample for a given, unique key. Subclasses can also optionally overwrite
:meth:`__len__`, which is expected to return the size of the dataset by many
:class:`~torch.utils.data.Sampler` implementations and the default options
of :class:`~torch.utils.data.DataLoader`.
These DataPipes can be invoked in two ways, using the class constructor or applying their
functional form onto an existing `MapDataPipe` (recommend, available to most but not all DataPipes).
Note:
:class:`~torch.utils.data.DataLoader` by default constructs an index
sampler that yields integral indices. To make it work with a map-style
DataPipe with non-integral indices/keys, a custom sampler must be provided.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper, Mapper
>>> dp = SequenceWrapper(range(10))
>>> map_dp_1 = dp.map(lambda x: x + 1) # Using functional form (recommended)
>>> list(map_dp_1)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> map_dp_2 = Mapper(dp, lambda x: x + 1) # Using class constructor
>>> list(map_dp_2)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> batch_dp = map_dp_1.batch(batch_size=2)
>>> list(batch_dp)
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
"""
functions: dict[str, Callable] = {}
reduce_ex_hook: Optional[Callable] = None
getstate_hook: Optional[Callable] = None
str_hook: Optional[Callable] = None
repr_hook: Optional[Callable] = None
def __getattr__(self, attribute_name):
if attribute_name in MapDataPipe.functions:
if attribute_name in _map_deprecated_functional_names:
kwargs = _map_deprecated_functional_names[attribute_name]
_deprecation_warning(**kwargs)
f = MapDataPipe.functions[attribute_name]
function = functools.partial(f, self)
functools.update_wrapper(wrapper=function, wrapped=f, assigned=("__doc__",))
return function
else:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{attribute_name}"
)
@classmethod
def register_function(cls, function_name, function):
cls.functions[function_name] = function
@classmethod
def register_datapipe_as_function(cls, function_name, cls_to_register):
if function_name in cls.functions:
raise Exception( # noqa: TRY002
f"Unable to add DataPipe function name {function_name} as it is already taken"
)
def class_function(cls, source_dp, *args, **kwargs):
result_pipe = cls(source_dp, *args, **kwargs)
return result_pipe
function = functools.partial(class_function, cls_to_register)
functools.update_wrapper(
wrapper=function, wrapped=cls_to_register, assigned=("__doc__",)
)
cls.functions[function_name] = function
def __getstate__(self):
"""
Serialize `lambda` functions when `dill` is available.
If this doesn't cover your custom DataPipe's use case, consider writing custom methods for
`__getstate__` and `__setstate__`, or use `pickle.dumps` for serialization.
"""
state = self.__dict__
if MapDataPipe.getstate_hook is not None:
return MapDataPipe.getstate_hook(state)
return state
def __reduce_ex__(self, *args, **kwargs):
if MapDataPipe.reduce_ex_hook is not None:
try:
return MapDataPipe.reduce_ex_hook(self)
except NotImplementedError:
pass
return super().__reduce_ex__(*args, **kwargs)
@classmethod
def set_getstate_hook(cls, hook_fn):
if MapDataPipe.getstate_hook is not None and hook_fn is not None:
raise RuntimeError("Attempt to override existing getstate_hook")
MapDataPipe.getstate_hook = hook_fn
@classmethod
def set_reduce_ex_hook(cls, hook_fn):
if MapDataPipe.reduce_ex_hook is not None and hook_fn is not None:
raise RuntimeError("Attempt to override existing reduce_ex_hook")
MapDataPipe.reduce_ex_hook = hook_fn
def __repr__(self):
if self.repr_hook is not None:
return self.repr_hook(self)
# Instead of showing <torch. ... .MapperMapDataPipe object at 0x.....>, return the class name
return str(self.__class__.__qualname__)
def __str__(self):
if self.str_hook is not None:
return self.str_hook(self)
# Instead of showing <torch. ... .MapperMapDataPipe object at 0x.....>, return the class name
return str(self.__class__.__qualname__)
def __dir__(self):
# for auto-completion in a REPL (e.g. Jupyter notebook)
return list(super().__dir__()) + list(self.functions.keys())
class _DataPipeSerializationWrapper:
def __init__(self, datapipe):
self._datapipe = datapipe
def __getstate__(self):
use_dill = False
try:
value = pickle.dumps(self._datapipe)
except Exception:
if HAS_DILL:
value = dill.dumps(self._datapipe)
use_dill = True
else:
raise
return (value, use_dill)
def __setstate__(self, state):
value, use_dill = state
if use_dill:
self._datapipe = dill.loads(value)
else:
self._datapipe = pickle.loads(value)
def __len__(self):
try:
return len(self._datapipe)
except Exception as e:
raise TypeError(
f"{type(self).__name__} instance doesn't have valid length"
) from e
class _IterDataPipeSerializationWrapper(_DataPipeSerializationWrapper, IterDataPipe):
def __init__(self, datapipe: IterDataPipe[_T_co]):
super().__init__(datapipe)
self._datapipe_iter: Optional[Iterator[_T_co]] = None
def __iter__(self) -> "_IterDataPipeSerializationWrapper":
self._datapipe_iter = iter(self._datapipe)
return self
def __next__(self) -> _T_co: # type: ignore[type-var]
assert self._datapipe_iter is not None
return next(self._datapipe_iter)
class _MapDataPipeSerializationWrapper(_DataPipeSerializationWrapper, MapDataPipe):
def __getitem__(self, idx):
return self._datapipe[idx]
```
|
=============================================================================================================================
SOURCE CODE FILE: gen_pyi.py
LINES: 9
SIZE: 10.82 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\gen_pyi.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import os
import pathlib
from collections import defaultdict
from typing import Any, Union
def materialize_lines(lines: list[str], indentation: int) -> str:
output = ""
new_line_with_indent = "\n" + " " * indentation
for i, line in enumerate(lines):
if i != 0:
output += new_line_with_indent
output += line.replace("\n", new_line_with_indent)
return output
def gen_from_template(
dir: str,
template_name: str,
output_name: str,
replacements: list[tuple[str, Any, int]],
):
template_path = os.path.join(dir, template_name)
output_path = os.path.join(dir, output_name)
with open(template_path) as f:
content = f.read()
for placeholder, lines, indentation in replacements:
with open(output_path, "w") as f:
content = content.replace(
placeholder, materialize_lines(lines, indentation)
)
f.write(content)
def find_file_paths(dir_paths: list[str], files_to_exclude: set[str]) -> set[str]:
"""
When given a path to a directory, returns the paths to the relevant files within it.
This function does NOT recursive traverse to subdirectories.
"""
paths: set[str] = set()
for dir_path in dir_paths:
all_files = os.listdir(dir_path)
python_files = {fname for fname in all_files if ".py" == fname[-3:]}
filter_files = {
fname for fname in python_files if fname not in files_to_exclude
}
paths.update({os.path.join(dir_path, fname) for fname in filter_files})
return paths
def extract_method_name(line: str) -> str:
"""Extract method name from decorator in the form of "@functional_datapipe({method_name})"."""
if '("' in line:
start_token, end_token = '("', '")'
elif "('" in line:
start_token, end_token = "('", "')"
else:
raise RuntimeError(
f"Unable to find appropriate method name within line:\n{line}"
)
start, end = line.find(start_token) + len(start_token), line.find(end_token)
return line[start:end]
def extract_class_name(line: str) -> str:
"""Extract class name from class definition in the form of "class {CLASS_NAME}({Type}):"."""
start_token = "class "
end_token = "("
start, end = line.find(start_token) + len(start_token), line.find(end_token)
return line[start:end]
def parse_datapipe_file(
file_path: str,
) -> tuple[dict[str, str], dict[str, str], set[str], dict[str, list[str]]]:
"""Given a path to file, parses the file and returns a dictionary of method names to function signatures."""
method_to_signature, method_to_class_name, special_output_type = {}, {}, set()
doc_string_dict = defaultdict(list)
with open(file_path) as f:
open_paren_count = 0
method_name, class_name, signature = "", "", ""
skip = False
for line in f:
if line.count('"""') % 2 == 1:
skip = not skip
if skip or '"""' in line: # Saving docstrings
doc_string_dict[method_name].append(line)
continue
if "@functional_datapipe" in line:
method_name = extract_method_name(line)
doc_string_dict[method_name] = []
continue
if method_name and "class " in line:
class_name = extract_class_name(line)
continue
if method_name and ("def __init__(" in line or "def __new__(" in line):
if "def __new__(" in line:
special_output_type.add(method_name)
open_paren_count += 1
start = line.find("(") + len("(")
line = line[start:]
if open_paren_count > 0:
open_paren_count += line.count("(")
open_paren_count -= line.count(")")
if open_paren_count == 0:
end = line.rfind(")")
signature += line[:end]
method_to_signature[method_name] = process_signature(signature)
method_to_class_name[method_name] = class_name
method_name, class_name, signature = "", "", ""
elif open_paren_count < 0:
raise RuntimeError(
"open parenthesis count < 0. This shouldn't be possible."
)
else:
signature += line.strip("\n").strip(" ")
return (
method_to_signature,
method_to_class_name,
special_output_type,
doc_string_dict,
)
def parse_datapipe_files(
file_paths: set[str],
) -> tuple[dict[str, str], dict[str, str], set[str], dict[str, list[str]]]:
(
methods_and_signatures,
methods_and_class_names,
methods_with_special_output_types,
) = ({}, {}, set())
methods_and_doc_strings = {}
for path in file_paths:
(
method_to_signature,
method_to_class_name,
methods_needing_special_output_types,
doc_string_dict,
) = parse_datapipe_file(path)
methods_and_signatures.update(method_to_signature)
methods_and_class_names.update(method_to_class_name)
methods_with_special_output_types.update(methods_needing_special_output_types)
methods_and_doc_strings.update(doc_string_dict)
return (
methods_and_signatures,
methods_and_class_names,
methods_with_special_output_types,
methods_and_doc_strings,
)
def split_outside_bracket(line: str, delimiter: str = ",") -> list[str]:
"""Given a line of text, split it on comma unless the comma is within a bracket '[]'."""
bracket_count = 0
curr_token = ""
res = []
for char in line:
if char == "[":
bracket_count += 1
elif char == "]":
bracket_count -= 1
elif char == delimiter and bracket_count == 0:
res.append(curr_token)
curr_token = ""
continue
curr_token += char
res.append(curr_token)
return res
def process_signature(line: str) -> str:
"""
Clean up a given raw function signature.
This includes removing the self-referential datapipe argument, default
arguments of input functions, newlines, and spaces.
"""
tokens: list[str] = split_outside_bracket(line)
for i, token in enumerate(tokens):
tokens[i] = token.strip(" ")
if token == "cls":
tokens[i] = "self"
elif i > 0 and ("self" == tokens[i - 1]) and (tokens[i][0] != "*"):
# Remove the datapipe after 'self' or 'cls' unless it has '*'
tokens[i] = ""
elif "Callable =" in token: # Remove default argument if it is a function
head, _default_arg = token.rsplit("=", 2)
tokens[i] = head.strip(" ") + "= ..."
tokens = [t for t in tokens if t != ""]
line = ", ".join(tokens)
return line
def get_method_definitions(
file_path: Union[str, list[str]],
files_to_exclude: set[str],
deprecated_files: set[str],
default_output_type: str,
method_to_special_output_type: dict[str, str],
root: str = "",
) -> list[str]:
"""
#.pyi generation for functional DataPipes Process.
# 1. Find files that we want to process (exclude the ones who don't)
# 2. Parse method name and signature
# 3. Remove first argument after self (unless it is "*datapipes"), default args, and spaces
"""
if root == "":
root = str(pathlib.Path(__file__).parent.resolve())
file_path = [file_path] if isinstance(file_path, str) else file_path
file_path = [os.path.join(root, path) for path in file_path]
file_paths = find_file_paths(
file_path, files_to_exclude=files_to_exclude.union(deprecated_files)
)
(
methods_and_signatures,
methods_and_class_names,
methods_w_special_output_types,
methods_and_doc_strings,
) = parse_datapipe_files(file_paths)
for fn_name in method_to_special_output_type:
if fn_name not in methods_w_special_output_types:
methods_w_special_output_types.add(fn_name)
method_definitions = []
for method_name, arguments in methods_and_signatures.items():
class_name = methods_and_class_names[method_name]
if method_name in methods_w_special_output_types:
output_type = method_to_special_output_type[method_name]
else:
output_type = default_output_type
doc_string = "".join(methods_and_doc_strings[method_name])
if doc_string == "":
doc_string = " ...\n"
method_definitions.append(
f"# Functional form of '{class_name}'\n"
f"def {method_name}({arguments}) -> {output_type}:\n"
f"{doc_string}"
)
method_definitions.sort(
key=lambda s: s.split("\n")[1]
) # sorting based on method_name
return method_definitions
# Defined outside of main() so they can be imported by TorchData
iterDP_file_path: str = "iter"
iterDP_files_to_exclude: set[str] = {"__init__.py", "utils.py"}
iterDP_deprecated_files: set[str] = set()
iterDP_method_to_special_output_type: dict[str, str] = {
"demux": "List[IterDataPipe]",
"fork": "List[IterDataPipe]",
}
mapDP_file_path: str = "map"
mapDP_files_to_exclude: set[str] = {"__init__.py", "utils.py"}
mapDP_deprecated_files: set[str] = set()
mapDP_method_to_special_output_type: dict[str, str] = {"shuffle": "IterDataPipe"}
def main() -> None:
"""
# Inject file into template datapipe.pyi.in.
TODO: The current implementation of this script only generates interfaces for built-in methods. To generate
interface for user-defined DataPipes, consider changing `IterDataPipe.register_datapipe_as_function`.
"""
iter_method_definitions = get_method_definitions(
iterDP_file_path,
iterDP_files_to_exclude,
iterDP_deprecated_files,
"IterDataPipe",
iterDP_method_to_special_output_type,
)
map_method_definitions = get_method_definitions(
mapDP_file_path,
mapDP_files_to_exclude,
mapDP_deprecated_files,
"MapDataPipe",
mapDP_method_to_special_output_type,
)
path = pathlib.Path(__file__).parent.resolve()
replacements = [
("${IterDataPipeMethods}", iter_method_definitions, 4),
("${MapDataPipeMethods}", map_method_definitions, 4),
]
gen_from_template(
dir=str(path),
template_name="datapipe.pyi.in",
output_name="datapipe.pyi",
replacements=replacements,
)
if __name__ == "__main__":
main()
```
|
===================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 1.84 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\iter\__init__.py
ENCODING: utf-8
```py
from torch.utils.data.datapipes.iter.callable import (
CollatorIterDataPipe as Collator,
MapperIterDataPipe as Mapper,
)
from torch.utils.data.datapipes.iter.combinatorics import (
SamplerIterDataPipe as Sampler,
ShufflerIterDataPipe as Shuffler,
)
from torch.utils.data.datapipes.iter.combining import (
ConcaterIterDataPipe as Concater,
DemultiplexerIterDataPipe as Demultiplexer,
ForkerIterDataPipe as Forker,
MultiplexerIterDataPipe as Multiplexer,
ZipperIterDataPipe as Zipper,
)
from torch.utils.data.datapipes.iter.filelister import (
FileListerIterDataPipe as FileLister,
)
from torch.utils.data.datapipes.iter.fileopener import (
FileOpenerIterDataPipe as FileOpener,
)
from torch.utils.data.datapipes.iter.grouping import (
BatcherIterDataPipe as Batcher,
GrouperIterDataPipe as Grouper,
UnBatcherIterDataPipe as UnBatcher,
)
from torch.utils.data.datapipes.iter.routeddecoder import (
RoutedDecoderIterDataPipe as RoutedDecoder,
)
from torch.utils.data.datapipes.iter.selecting import FilterIterDataPipe as Filter
from torch.utils.data.datapipes.iter.sharding import (
ShardingFilterIterDataPipe as ShardingFilter,
)
from torch.utils.data.datapipes.iter.streamreader import (
StreamReaderIterDataPipe as StreamReader,
)
from torch.utils.data.datapipes.iter.utils import (
IterableWrapperIterDataPipe as IterableWrapper,
)
__all__ = [
"Batcher",
"Collator",
"Concater",
"Demultiplexer",
"FileLister",
"FileOpener",
"Filter",
"Forker",
"Grouper",
"IterableWrapper",
"Mapper",
"Multiplexer",
"RoutedDecoder",
"Sampler",
"ShardingFilter",
"Shuffler",
"StreamReader",
"UnBatcher",
"Zipper",
]
# Please keep this list sorted
assert __all__ == sorted(__all__)
```
|
===================================================================================================================================
SOURCE CODE FILE: callable.py
LINES: 1
SIZE: 9.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\iter\callable.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import functools
from collections import namedtuple
from collections.abc import Iterator, Sized
from typing import Any, Callable, Optional, TypeVar, Union
from torch.utils.data._utils.collate import default_collate
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.datapipes.utils.common import (
_check_unpickable_fn,
validate_input_col,
)
__all__ = [
"CollatorIterDataPipe",
"MapperIterDataPipe",
]
_T_co = TypeVar("_T_co", covariant=True)
@functional_datapipe("map")
class MapperIterDataPipe(IterDataPipe[_T_co]):
r"""
Applies a function over each item from the source DataPipe (functional name: ``map``).
The function can be any regular Python function or partial object. Lambda
function is not recommended as it is not supported by pickle.
Args:
datapipe: Source Iterable DataPipe
fn: Function being applied over each item
input_col: Index or indices of data which ``fn`` is applied, such as:
- ``None`` as default to apply ``fn`` to the data directly.
- Integer(s) is used for list/tuple.
- Key(s) is used for dict.
output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified
only when ``input_col`` is not ``None``
- ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with
multiple indices, the left-most one is used, and other indices will be removed.
- Integer is used for list/tuple. ``-1`` represents to append result at the end.
- Key is used for dict. New key is acceptable.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper, Mapper
>>> def add_one(x):
... return x + 1
>>> dp = IterableWrapper(range(10))
>>> map_dp_1 = dp.map(add_one) # Invocation via functional form is preferred
>>> list(map_dp_1)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> # We discourage the usage of `lambda` functions as they are not serializable with `pickle`
>>> # Use `functools.partial` or explicitly define the function instead
>>> map_dp_2 = Mapper(dp, lambda x: x + 1)
>>> list(map_dp_2)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
datapipe: IterDataPipe
fn: Callable
def __init__(
self,
datapipe: IterDataPipe,
fn: Callable,
input_col=None,
output_col=None,
) -> None:
super().__init__()
self.datapipe = datapipe
_check_unpickable_fn(fn)
self.fn = fn # type: ignore[assignment]
self.input_col = input_col
if input_col is None and output_col is not None:
raise ValueError("`output_col` must be None when `input_col` is None.")
if isinstance(output_col, (list, tuple)):
if len(output_col) > 1:
raise ValueError("`output_col` must be a single-element list or tuple")
output_col = output_col[0]
self.output_col = output_col
validate_input_col(fn, input_col)
def _apply_fn(self, data):
if self.input_col is None and self.output_col is None:
return self.fn(data)
if self.input_col is None:
res = self.fn(data)
elif isinstance(self.input_col, (list, tuple)):
args = tuple(data[col] for col in self.input_col)
res = self.fn(*args)
else:
res = self.fn(data[self.input_col])
# Copy tuple to list and run in-place modification because tuple is immutable.
if isinstance(data, tuple):
t_flag = True
data = list(data)
else:
t_flag = False
if self.output_col is None:
if isinstance(self.input_col, (list, tuple)):
data[self.input_col[0]] = res
for idx in sorted(self.input_col[1:], reverse=True):
del data[idx]
else:
data[self.input_col] = res
else:
if self.output_col == -1:
data.append(res)
else:
data[self.output_col] = res
# Convert list back to tuple
return tuple(data) if t_flag else data
def __iter__(self) -> Iterator[_T_co]:
for data in self.datapipe:
yield self._apply_fn(data)
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
def _collate_helper(conversion, item):
# TODO(VitalyFedyunin): Verify that item is any sort of batch
if len(item.items) > 1:
# TODO(VitalyFedyunin): Compact all batch dataframes into one
raise RuntimeError("Only supports one DataFrame per batch")
df = item[0]
columns_name = df_wrapper.get_columns(df)
tuple_names: list = []
tuple_values: list = []
for name in conversion.keys():
if name not in columns_name:
raise RuntimeError("Conversion keys missmatch")
for name in columns_name:
if name in conversion:
if not callable(conversion[name]):
raise RuntimeError(
"Collate (DF)DataPipe requires callable as dict values"
)
collation_fn = conversion[name]
else:
# TODO(VitalyFedyunin): Add default collation into df_wrapper
try:
import torcharrow.pytorch as tap # type: ignore[import]
collation_fn = tap.rec.Default()
except Exception as e:
raise RuntimeError(
"unable to import default collation function from the TorchArrow"
) from e
tuple_names.append(str(name))
value = collation_fn(df[name])
tuple_values.append(value)
# TODO(VitalyFedyunin): We can dynamically extract types from the tuple_values here
# TODO(VitalyFedyunin): Instead of ignoring mypy error, make sure tuple_names is not empty
tpl_cls = namedtuple("CollateResult", tuple_names) # type: ignore[misc]
tuple = tpl_cls(*tuple_values)
return tuple
@functional_datapipe("collate")
class CollatorIterDataPipe(MapperIterDataPipe):
r"""
Collates samples from DataPipe to Tensor(s) by a custom collate function (functional name: ``collate``).
By default, it uses :func:`torch.utils.data.default_collate`.
.. note::
While writing a custom collate function, you can import :func:`torch.utils.data.default_collate` for the
default behavior and `functools.partial` to specify any additional arguments.
Args:
datapipe: Iterable DataPipe being collated
collate_fn: Customized collate function to collect and combine data or a batch of data.
Default function collates to Tensor(s) based on data type.
Example:
>>> # xdoctest: +SKIP
>>> # Convert integer data to float Tensor
>>> class MyIterDataPipe(torch.utils.data.IterDataPipe):
... def __init__(self, start, end):
... super(MyIterDataPipe).__init__()
... assert end > start, "this example code only works with end >= start"
... self.start = start
... self.end = end
...
... def __iter__(self):
... return iter(range(self.start, self.end))
...
... def __len__(self):
... return self.end - self.start
...
>>> ds = MyIterDataPipe(start=3, end=7)
>>> print(list(ds))
[3, 4, 5, 6]
>>> def collate_fn(batch):
... return torch.tensor(batch, dtype=torch.float)
...
>>> collated_ds = CollateIterDataPipe(ds, collate_fn=collate_fn)
>>> print(list(collated_ds))
[tensor(3.), tensor(4.), tensor(5.), tensor(6.)]
"""
def __init__(
self,
datapipe: IterDataPipe,
conversion: Union[
Callable[..., Any], dict[Union[str, Any], Union[Callable, Any]], None
] = default_collate,
collate_fn: Optional[Callable] = None,
) -> None:
# TODO(VitalyFedyunin): Replace `Callable[..., Any]` with `Callable[[IColumn], Any]`
# TODO(VitalyFedyunin): Replace with `Dict[Union[str, IColumn], Union[Callable, Enum]]`
if collate_fn is not None:
super().__init__(datapipe, fn=collate_fn)
else:
if callable(conversion):
super().__init__(datapipe, fn=conversion)
else:
# TODO(VitalyFedyunin): Validate passed dictionary
collate_fn = functools.partial(_collate_helper, conversion)
super().__init__(datapipe, fn=collate_fn)
```
|
========================================================================================================================================
SOURCE CODE FILE: combinatorics.py
LINES: 1
SIZE: 6.49 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\iter\combinatorics.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import random
from collections.abc import Iterator, Sized
from typing import Optional, TypeVar
import torch
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.sampler import Sampler, SequentialSampler
__all__ = [
"SamplerIterDataPipe",
"ShufflerIterDataPipe",
]
_T_co = TypeVar("_T_co", covariant=True)
class SamplerIterDataPipe(IterDataPipe[_T_co]):
r"""
Generate sample elements using the provided ``Sampler`` (defaults to :class:`SequentialSampler`).
Args:
datapipe: IterDataPipe to sample from
sampler: Sampler class to generate sample elements from input DataPipe.
Default is :class:`SequentialSampler` for IterDataPipe
"""
datapipe: IterDataPipe
sampler: Sampler
def __init__(
self,
datapipe: IterDataPipe,
sampler: type[Sampler] = SequentialSampler,
sampler_args: Optional[tuple] = None,
sampler_kwargs: Optional[dict] = None,
) -> None:
assert isinstance(
datapipe, Sized
), "Sampler class requires input datapipe implemented `__len__`"
super().__init__()
self.datapipe = datapipe
self.sampler_args = () if sampler_args is None else sampler_args
self.sampler_kwargs = {} if sampler_kwargs is None else sampler_kwargs
# https://github.com/python/mypy/pull/9629 will solve
self.sampler = sampler(*self.sampler_args, data_source=self.datapipe, **self.sampler_kwargs) # type: ignore[misc]
def __iter__(self) -> Iterator[_T_co]:
return iter(self.sampler)
def __len__(self) -> int:
# Dataset has been tested as `Sized`
if isinstance(self.sampler, Sized):
return len(self.sampler)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
@functional_datapipe("shuffle")
class ShufflerIterDataPipe(IterDataPipe[_T_co]):
r"""
Shuffle the input DataPipe with a buffer (functional name: ``shuffle``).
The buffer with ``buffer_size`` is filled with elements from the datapipe first. Then,
each item will be yielded from the buffer by reservoir sampling via iterator.
``buffer_size`` is required to be larger than ``0``. For ``buffer_size == 1``, the
datapipe is not shuffled. In order to fully shuffle all elements from datapipe,
``buffer_size`` is required to be greater than or equal to the size of datapipe.
When it is used with :class:`torch.utils.data.DataLoader`, the methods to
set up random seed are different based on :attr:`num_workers`.
For single-process mode (:attr:`num_workers == 0`), the random seed is set before
the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process
mode (:attr:`num_worker > 0`), `worker_init_fn` is used to set up a random seed
for each worker process.
Args:
datapipe: The IterDataPipe being shuffled
buffer_size: The buffer size for shuffling (default to ``10000``)
unbatch_level: Specifies if it is necessary to unbatch source data before
applying the shuffle
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(10))
>>> shuffle_dp = dp.shuffle()
>>> list(shuffle_dp)
[0, 4, 1, 6, 3, 2, 9, 5, 7, 8]
"""
datapipe: IterDataPipe[_T_co]
buffer_size: int
_buffer: list[_T_co]
_enabled: bool
_seed: Optional[int]
_rng: random.Random
def __init__(
self,
datapipe: IterDataPipe[_T_co],
*,
buffer_size: int = 10000,
unbatch_level: int = 0,
) -> None:
super().__init__()
# TODO: Performance optimization
# buffer can be a fixed size and remove expensive `append()` and `len()` operations
self._buffer: list[_T_co] = []
assert buffer_size > 0, "buffer_size should be larger than 0"
if unbatch_level == 0:
self.datapipe = datapipe
else:
self.datapipe = datapipe.unbatch(unbatch_level=unbatch_level)
self.buffer_size = buffer_size
self._enabled = True
self._seed = None
self._rng = random.Random()
def set_shuffle(self, shuffle=True):
self._enabled = shuffle
return self
def set_seed(self, seed: int):
self._seed = seed
return self
def __iter__(self) -> Iterator[_T_co]:
if not self._enabled:
yield from self.datapipe
else:
for x in self.datapipe:
if len(self._buffer) == self.buffer_size:
idx = self._rng.randint(0, len(self._buffer) - 1)
val, self._buffer[idx] = self._buffer[idx], x
yield val
else:
self._buffer.append(x)
while self._buffer:
idx = self._rng.randint(0, len(self._buffer) - 1)
yield self._buffer.pop(idx)
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
def reset(self) -> None:
self._buffer = []
if self._enabled:
if self._seed is None:
self._seed = int(torch.empty((), dtype=torch.int64).random_().item())
self._rng.seed(self._seed)
self._seed = None
def __getstate__(self):
state = (
self.datapipe,
self.buffer_size,
self._enabled,
self._seed,
self._buffer,
self._rng.getstate(),
self._valid_iterator_id,
self._number_of_samples_yielded,
)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(
self.datapipe,
self.buffer_size,
self._enabled,
self._seed,
self._buffer,
rng_state,
self._valid_iterator_id,
self._number_of_samples_yielded,
) = state
self._rng = random.Random()
self._rng.setstate(rng_state)
def __del__(self):
self._buffer.clear()
```
|
====================================================================================================================================
SOURCE CODE FILE: combining.py
LINES: 1
SIZE: 27.36 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\iter\combining.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import copy as copymodule
import warnings
from abc import ABC, abstractmethod
from collections import deque
from collections.abc import Iterator, Sized
from typing import Any, Callable, Literal, Optional, TypeVar
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes._hook_iterator import _SnapshotState
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.datapipes.utils.common import _check_unpickable_fn, StreamWrapper
__all__ = [
"ConcaterIterDataPipe",
"DemultiplexerIterDataPipe",
"ForkerIterDataPipe",
"MultiplexerIterDataPipe",
"ZipperIterDataPipe",
]
_T_co = TypeVar("_T_co", covariant=True)
@functional_datapipe("concat")
class ConcaterIterDataPipe(IterDataPipe):
r"""
Concatenates multiple Iterable DataPipes (functional name: ``concat``).
The resulting DataPipe will yield all the elements from the first input DataPipe, before yielding from the subsequent ones.
Args:
datapipes: Iterable DataPipes being concatenated
Example:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> import random
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp1 = IterableWrapper(range(3))
>>> dp2 = IterableWrapper(range(5))
>>> list(dp1.concat(dp2))
[0, 1, 2, 0, 1, 2, 3, 4]
"""
datapipes: tuple[IterDataPipe]
def __init__(self, *datapipes: IterDataPipe):
if len(datapipes) == 0:
raise ValueError("Expected at least one DataPipe, but got nothing")
if not all(isinstance(dp, IterDataPipe) for dp in datapipes):
raise TypeError("Expected all inputs to be `IterDataPipe`")
self.datapipes = datapipes # type: ignore[assignment]
def __iter__(self) -> Iterator:
for dp in self.datapipes:
yield from dp
def __len__(self) -> int:
if all(isinstance(dp, Sized) for dp in self.datapipes):
return sum(len(dp) for dp in self.datapipes)
else:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
@functional_datapipe("fork")
class ForkerIterDataPipe(IterDataPipe):
r"""
Creates multiple instances of the same Iterable DataPipe (functional name: ``fork``).
Args:
datapipe: Iterable DataPipe being copied
num_instances: number of instances of the datapipe to create
buffer_size: this restricts how far ahead the leading child DataPipe
can read relative to the slowest child DataPipe.
Defaults to ``1000``. Use ``-1`` for the unlimited buffer.
copy: copy strategy to use for items yielded by each branch. Supported
options are ``None`` for no copying, ``"shallow"`` for shallow object
copies, and ``"deep"`` for deep object copies. Defaults to ``None``.
Note:
All branches of the forked pipeline return the identical object unless
the copy parameter is supplied. If the object is mutable or contains
mutable objects, changing them in one branch will affect all others.
Example:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper(range(5))
>>> dp1, dp2 = source_dp.fork(num_instances=2)
>>> list(dp1)
[0, 1, 2, 3, 4]
>>> list(dp2)
[0, 1, 2, 3, 4]
"""
def __new__(
cls,
datapipe: IterDataPipe,
num_instances: int,
buffer_size: int = 1000,
copy: Optional[Literal["shallow", "deep"]] = None,
):
if num_instances < 1:
raise ValueError(
f"Expected `num_instances` larger than 0, but {num_instances} is found"
)
if num_instances == 1:
return datapipe
container = _ForkerIterDataPipe(datapipe, num_instances, buffer_size, copy) # type: ignore[abstract]
return [_ChildDataPipe(container, i) for i in range(num_instances)]
class _ContainerTemplate(ABC):
r"""Abstract class for container ``DataPipes``. The followings are three required methods."""
@abstractmethod
def get_next_element_by_instance(self, instance_id: int):
...
@abstractmethod
def is_every_instance_exhausted(self) -> bool:
...
@abstractmethod
def reset(self) -> None:
...
@abstractmethod
def get_length_by_instance(self, instance_id: int):
r"""Raise TypeError if it's not supposed to be implemented to support `list(datapipe)`."""
def _no_op(x):
return x
class _ForkerIterDataPipe(IterDataPipe, _ContainerTemplate):
r"""
Container to hold instance-specific information on behalf of ForkerIterDataPipe.
It tracks the state of its child DataPipes, maintains the buffer, and yields the next value
as requested by the child DataPipes.
"""
def __init__(
self,
datapipe: IterDataPipe,
num_instances: int,
buffer_size: int = 1000,
copy: Optional[Literal["shallow", "deep"]] = None,
):
self.main_datapipe = datapipe
self._datapipe_iterator: Optional[Iterator[Any]] = None
self.num_instances = num_instances
self.buffer: deque = deque()
self.buffer_size = buffer_size
if self.buffer_size < 0:
warnings.warn(
"Unlimited buffer size is set for `fork`, "
"please be aware of OOM at random places",
UserWarning,
)
if copy is None:
self.copy_fn = _no_op
elif copy == "shallow":
self.copy_fn = copymodule.copy
elif copy == "deep":
self.copy_fn = copymodule.deepcopy
else:
raise ValueError(
f"Unknown copy method `{copy}` requested, choose one of None, `shallow` or `deep`."
)
self.child_pointers: list[int] = [
0
] * num_instances # Indicate the indices of the next element to get
self.slowest_ptr = 0 # The index to read by the slowest child
self.leading_ptr = 0 # The index to read by the fastest child
self.end_ptr: Optional[int] = None # The index to stop child
self._child_stop: list[bool] = [True for _ in range(num_instances)]
def __len__(self):
return len(self.main_datapipe)
def get_next_element_by_instance(self, instance_id: int):
if self._datapipe_iterator is None and self._child_stop[instance_id]:
self._datapipe_iterator = iter(self.main_datapipe)
self._snapshot_state = _SnapshotState.Iterating
for i in range(self.num_instances):
self._child_stop[i] = False
try:
while not self._child_stop[instance_id]:
self.child_pointers[instance_id] += 1
if (
self.end_ptr is not None
and self.child_pointers[instance_id] == self.end_ptr
):
self._child_stop[instance_id] = True
break
# Use buffer
if self.buffer and self.child_pointers[instance_id] <= self.leading_ptr:
idx = self.child_pointers[instance_id] - self.slowest_ptr - 1
return_val = self.buffer[idx]
else: # Retrieve one element from main datapipe
self.leading_ptr = self.child_pointers[instance_id]
try:
return_val = next(self._datapipe_iterator) # type: ignore[arg-type]
self.buffer.append(return_val)
except StopIteration:
self._child_stop[instance_id] = True
self._datapipe_iterator = None
self.end_ptr = self.leading_ptr
continue
if self.child_pointers[instance_id] == self.slowest_ptr + 1:
new_min = min(
self.child_pointers
) # Can optimize by avoiding the call to min()
if self.slowest_ptr < new_min:
self.slowest_ptr = new_min
self.buffer.popleft()
if (
self.buffer_size >= 0
and self.leading_ptr > self.buffer_size + self.slowest_ptr
):
raise BufferError(
"ForkerIterDataPipe buffer overflow,"
+ f"buffer size {self.buffer_size} is insufficient."
)
yield self.copy_fn(return_val) # type: ignore[possibly-undefined]
finally:
self._child_stop[instance_id] = True
# Cleanup _datapipe_iterator for the case that fork exits earlier
if all(self._child_stop):
self._datapipe_iterator = None
self._cleanup()
def is_every_instance_exhausted(self) -> bool:
return self.end_ptr is not None and all(self._child_stop)
def get_length_by_instance(self, instance_id: int) -> int:
return len(self.main_datapipe)
def reset(self) -> None:
self._datapipe_iterator = None
self.buffer = deque()
self.child_pointers = [0] * self.num_instances
self.slowest_ptr = 0
self.leading_ptr = 0
self.end_ptr = None
self._child_stop = [True for _ in range(self.num_instances)]
def __getstate__(self):
state = (
self.main_datapipe,
self.num_instances,
self.buffer_size,
self.copy_fn,
self._valid_iterator_id,
self._number_of_samples_yielded,
)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(
self.main_datapipe,
self.num_instances,
self.buffer_size,
self.copy_fn,
self._valid_iterator_id,
self._number_of_samples_yielded,
) = state
self._datapipe_iterator = None
self.buffer = deque()
self.child_pointers = [0] * self.num_instances
self.slowest_ptr = 0
self.leading_ptr = 0
self.end_ptr = None
self._child_stop = [True for _ in range(self.num_instances)]
def _cleanup(self):
while self.buffer:
d = self.buffer.popleft()
StreamWrapper.close_streams(d)
def __del__(self):
self._cleanup()
class _ChildDataPipe(IterDataPipe):
r"""
Iterable Datapipe that is a child of a main DataPipe.
The instance of this class will pass its instance_id to get the next value from its main DataPipe.
Note:
ChildDataPipe, like all other IterDataPipe, follows the single iterator per IterDataPipe constraint.
Since ChildDataPipes share a common buffer, when an iterator is created for one of the ChildDataPipes,
the previous iterators for all ChildDataPipes must be invalidated, with the exception when a ChildDataPipe
hasn't had an iterator created from it since the last invalidation. See the example below.
Example:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> # Singler Iterator per IteraDataPipe Invalidation
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper(range(10))
>>> cdp1, cdp2 = source_dp.fork(num_instances=2)
>>> it1, it2 = iter(cdp1), iter(cdp2)
>>> it3 = iter(cdp1)
>>> # The line above invalidates `it1` and `it2`, and resets `ForkerIterDataPipe`.
>>> it4 = iter(cdp2)
>>> # The line above doesn't invalidate `it3`, because an iterator for `cdp2` hasn't been created since
>>> # the last invalidation.
Args:
main_datapipe: Main DataPipe with a method 'get_next_element_by_instance(instance_id)'
instance_id: integer identifier of this instance
"""
_is_child_datapipe: bool = True
def __init__(self, main_datapipe: IterDataPipe, instance_id: int):
assert isinstance(main_datapipe, _ContainerTemplate)
self.main_datapipe: IterDataPipe = main_datapipe
self.instance_id = instance_id
def __iter__(self):
# Note that the logic behind setting iterator ID and `reset` are handled within `hook_iterator`
# We want to separate the code for reset and yield, so that 'reset' executes before __next__ is called
return self.main_datapipe.get_next_element_by_instance(self.instance_id)
def __len__(self):
return self.main_datapipe.get_length_by_instance(self.instance_id)
# This method is called by `hook_iterator` in `_typing.py`.
def _set_main_datapipe_valid_iterator_id(self) -> int:
r"""
Update the valid iterator ID for both this DataPipe object and `main_datapipe`.
`main_datapipe.reset()` is called when the ID is incremented to a new generation.
"""
# 1. First time any child iterator is created
if self.main_datapipe._valid_iterator_id is None:
self.main_datapipe._valid_iterator_id = 0 # type: ignore[attr-defined]
# 2. This instance was already in the same generation as `main_datapipe`,
# we need to increment the ID further by 1
elif self.main_datapipe._valid_iterator_id == self._valid_iterator_id: # type: ignore[has-type]
self.main_datapipe._valid_iterator_id += 1 # type: ignore[attr-defined]
# Whenever a new generation of iterator is created, the `main_datapipe` must reset
if not self.main_datapipe.is_every_instance_exhausted():
warnings.warn(
"Some child DataPipes are not exhausted when __iter__ is called. We are resetting "
"the buffer and each child DataPipe will read from the start again.",
UserWarning,
)
self.main_datapipe.reset()
# 3. Otherwise, the iterator is behind the others, so it will just need to catch up by setting
# the instance's iterator to match that of `main_datapipe`
self._valid_iterator_id = self.main_datapipe._valid_iterator_id
return self._valid_iterator_id
# This method is called by `hook_iterator` in `_typing.py`.
def _check_valid_iterator_id(self, iterator_id) -> bool:
r"""Check the valid iterator ID against that of DataPipe object and that of `main_datapipe`."""
return (
iterator_id == self._valid_iterator_id
and iterator_id == self.main_datapipe._valid_iterator_id
)
@functional_datapipe("demux")
class DemultiplexerIterDataPipe(IterDataPipe):
r"""
Splits the input DataPipe into multiple child DataPipes, using the given classification function (functional name: ``demux``).
A list of the child DataPipes is returned from this operation.
Args:
datapipe: Iterable DataPipe being filtered
num_instances: number of instances of the DataPipe to create
classifier_fn: a function that maps values to an integer within the range ``[0, num_instances - 1]`` or ``None``
drop_none: defaults to ``False``, if ``True``, the function will skip over elements classified as ``None``
buffer_size: this defines the maximum number of inputs that the buffer can hold across all child
DataPipes while waiting for their values to be yielded.
Defaults to ``1000``. Use ``-1`` for the unlimited buffer.
Examples:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> from torchdata.datapipes.iter import IterableWrapper
>>> def odd_or_even(n):
... return n % 2
>>> source_dp = IterableWrapper(range(5))
>>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even)
>>> list(dp1)
[0, 2, 4]
>>> list(dp2)
[1, 3]
>>> # It can also filter out any element that gets `None` from the `classifier_fn`
>>> def odd_or_even_no_zero(n):
... return n % 2 if n != 0 else None
>>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even_no_zero, drop_none=True)
>>> list(dp1)
[2, 4]
>>> list(dp2)
[1, 3]
"""
def __new__(
cls,
datapipe: IterDataPipe,
num_instances: int,
classifier_fn: Callable[[_T_co], Optional[int]],
drop_none: bool = False,
buffer_size: int = 1000,
):
if num_instances < 1:
raise ValueError(
f"Expected `num_instances` larger than 0, but {num_instances} is found"
)
_check_unpickable_fn(classifier_fn)
# When num_instances == 1, demux can be replaced by filter,
# but keep it as Demultiplexer for the sake of consistency
# like throwing Error when classification result is out of o range
container = _DemultiplexerIterDataPipe(datapipe, num_instances, classifier_fn, drop_none, buffer_size) # type: ignore[abstract]
return [_ChildDataPipe(container, i) for i in range(num_instances)]
class _DemultiplexerIterDataPipe(IterDataPipe, _ContainerTemplate):
r"""
Container to hold instance-specific information on behalf of DemultiplexerIterDataPipe.
It tracks the state of its child DataPipes, maintains the buffer, classifies and yields the next correct value
as requested by the child DataPipes.
"""
def __init__(
self,
datapipe: IterDataPipe[_T_co],
num_instances: int,
classifier_fn: Callable[[_T_co], Optional[int]],
drop_none: bool,
buffer_size: int,
):
self.main_datapipe = datapipe
self._datapipe_iterator: Optional[Iterator[Any]] = None
self.num_instances = num_instances
self.buffer_size = buffer_size
if self.buffer_size < 0:
warnings.warn(
"Unlimited buffer size is set for `demux`, "
"please be aware of OOM at random places",
UserWarning,
)
self.current_buffer_usage = 0
self.child_buffers: list[deque[_T_co]] = [deque() for _ in range(num_instances)]
self.classifier_fn = classifier_fn
self.drop_none = drop_none
self.main_datapipe_exhausted = False
self._child_stop: list[bool] = [True for _ in range(num_instances)]
def _find_next(self, instance_id: int) -> _T_co: # type: ignore[type-var]
while True:
if self.main_datapipe_exhausted or self._child_stop[instance_id]:
raise StopIteration
if self._datapipe_iterator is None:
raise ValueError(
"_datapipe_iterator has not been set, likely because this private method is called directly "
"without invoking get_next_element_by_instance() first."
)
value = next(self._datapipe_iterator)
classification = self.classifier_fn(value)
if classification is None and self.drop_none:
StreamWrapper.close_streams(value)
continue
if (
classification is None
or classification >= self.num_instances
or classification < 0
):
raise ValueError(
f"Output of the classification fn should be between 0 and {self.num_instances - 1}. "
+ f"{classification} is returned."
)
if classification == instance_id:
return value
self.child_buffers[classification].append(value)
self.current_buffer_usage += 1
if self.buffer_size >= 0 and self.current_buffer_usage > self.buffer_size:
raise BufferError(
f"DemultiplexerIterDataPipe buffer overflow, buffer size {self.buffer_size} is insufficient."
)
def get_next_element_by_instance(self, instance_id: int):
if self._datapipe_iterator is None and self._child_stop[instance_id]:
self._datapipe_iterator = iter(self.main_datapipe)
self._snapshot_state = (
_SnapshotState.Iterating
) # This is necessary for the DataPipe to reset properly.
self.main_datapipe_exhausted = False
for i in range(self.num_instances):
self._child_stop[i] = False
try:
while not self._child_stop[instance_id]:
if self.child_buffers[instance_id]:
self.current_buffer_usage -= 1
yield self.child_buffers[instance_id].popleft()
else:
try:
yield self._find_next(instance_id)
except StopIteration:
self._child_stop[instance_id] = True
self.main_datapipe_exhausted = True
self._datapipe_iterator = None
finally:
self._child_stop[instance_id] = True
# Cleanup _datapipe_iterator for the case that demux exits earlier
if all(self._child_stop):
self._datapipe_iterator = None
if self.child_buffers[instance_id]:
self._cleanup(instance_id)
def is_every_instance_exhausted(self) -> bool:
return self.main_datapipe_exhausted and all(self._child_stop)
def get_length_by_instance(self, instance_id: int) -> int:
raise TypeError
def reset(self) -> None:
self._datapipe_iterator = None
self.current_buffer_usage = 0
self.child_buffers = [deque() for _ in range(self.num_instances)]
self._child_stop = [True for _ in range(self.num_instances)]
self.main_datapipe_exhausted = False
def __getstate__(self):
state = (
self.main_datapipe,
self.num_instances,
self.buffer_size,
self.classifier_fn,
self.drop_none,
self._valid_iterator_id,
self._number_of_samples_yielded,
)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(
self.main_datapipe,
self.num_instances,
self.buffer_size,
self.classifier_fn,
self.drop_none,
self._valid_iterator_id,
self._number_of_samples_yielded,
) = state
self._datapipe_iterator = None
self.current_buffer_usage = 0
self.child_buffers = [deque() for _ in range(self.num_instances)]
self._child_stop = [True for _ in range(self.num_instances)]
self.main_datapipe_exhausted = False
def _cleanup(self, instance_id: Optional[int] = None):
ids = (
range(self.num_instances)
if instance_id is None
else [
instance_id,
]
)
for i in ids:
q = self.child_buffers[i]
while q:
d = q.popleft()
StreamWrapper.close_streams(d)
def __del__(self):
self._cleanup()
@functional_datapipe("mux")
class MultiplexerIterDataPipe(IterDataPipe):
r"""
Yields one element at a time from each of the input Iterable DataPipes (functional name: ``mux``).
As in, one element from the 1st input DataPipe, then one element from the 2nd DataPipe in the next iteration,
and so on. It ends when the shortest input DataPipe is exhausted.
Args:
datapipes: Iterable DataPipes that will take turn to yield their elements, until the shortest DataPipe is exhausted
Example:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp1, dp2, dp3 = IterableWrapper(range(3)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25))
>>> list(dp1.mux(dp2, dp3))
[0, 10, 20, 1, 11, 21, 2, 12, 22]
"""
def __init__(self, *datapipes):
self.datapipes = datapipes
self.buffer: list = (
[]
) # Store values to be yielded only when every iterator provides one
def __iter__(self):
iterators = [iter(x) for x in self.datapipes]
while len(iterators):
for it in iterators:
try:
value = next(it)
self.buffer.append(value)
except StopIteration:
self.buffer.clear()
return
yield from self.buffer
self.buffer.clear()
def __len__(self):
if all(isinstance(dp, Sized) for dp in self.datapipes):
return min(len(dp) for dp in self.datapipes) * len(self.datapipes)
else:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
def reset(self) -> None:
self.buffer = []
def __getstate__(self):
state = (
self.datapipes,
self._valid_iterator_id,
self._number_of_samples_yielded,
)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(
self.datapipes,
self._valid_iterator_id,
self._number_of_samples_yielded,
) = state
self.buffer = []
def __del__(self):
self.buffer.clear()
@functional_datapipe("zip")
class ZipperIterDataPipe(IterDataPipe[tuple[_T_co]]):
r"""
Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``).
The output is stopped as soon as the shortest input DataPipe is exhausted.
Args:
*datapipes: Iterable DataPipes being aggregated
Example:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25))
>>> list(dp1.zip(dp2, dp3))
[(0, 10, 20), (1, 11, 21), (2, 12, 22), (3, 13, 23), (4, 14, 24)]
"""
datapipes: tuple[IterDataPipe]
def __init__(self, *datapipes: IterDataPipe):
if not all(isinstance(dp, IterDataPipe) for dp in datapipes):
raise TypeError(
"All inputs are required to be `IterDataPipe` for `ZipIterDataPipe`."
)
super().__init__()
self.datapipes = datapipes # type: ignore[assignment]
def __iter__(self) -> Iterator[tuple[_T_co]]:
iterators = [iter(datapipe) for datapipe in self.datapipes]
yield from zip(*iterators)
def __len__(self) -> int:
if all(isinstance(dp, Sized) for dp in self.datapipes):
return min(len(dp) for dp in self.datapipes)
else:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
```
|
=====================================================================================================================================
SOURCE CODE FILE: filelister.py
LINES: 1
SIZE: 2.62 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\iter\filelister.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from collections.abc import Iterator, Sequence
from typing import Union
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.datapipes.iter.utils import IterableWrapperIterDataPipe
from torch.utils.data.datapipes.utils.common import get_file_pathnames_from_root
__all__ = ["FileListerIterDataPipe"]
@functional_datapipe("list_files")
class FileListerIterDataPipe(IterDataPipe[str]):
r"""
Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory.
Multiple root directories can be provided (functional name: ``list_files``).
Args:
root: Root directory or a sequence of root directories
masks: Unix style filter string or string list for filtering file name(s)
recursive: Whether to return pathname from nested directories or not
abspath: Whether to return relative pathname or absolute pathname
non_deterministic: Whether to return pathname in sorted order or not.
If ``False``, the results yielded from each root directory will be sorted
length: Nominal length of the datapipe
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import FileLister
>>> dp = FileLister(root=".", recursive=True)
>>> list(dp)
['example.py', './data/data.tar']
"""
def __init__(
self,
root: Union[str, Sequence[str], IterDataPipe] = ".",
masks: Union[str, list[str]] = "",
*,
recursive: bool = False,
abspath: bool = False,
non_deterministic: bool = False,
length: int = -1,
) -> None:
super().__init__()
if isinstance(root, str):
root = [root]
if not isinstance(root, IterDataPipe):
root = IterableWrapperIterDataPipe(root)
self.datapipe: IterDataPipe = root
self.masks: Union[str, list[str]] = masks
self.recursive: bool = recursive
self.abspath: bool = abspath
self.non_deterministic: bool = non_deterministic
self.length: int = length
def __iter__(self) -> Iterator[str]:
for path in self.datapipe:
yield from get_file_pathnames_from_root(
path, self.masks, self.recursive, self.abspath, self.non_deterministic
)
def __len__(self):
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
```
|
=====================================================================================================================================
SOURCE CODE FILE: fileopener.py
LINES: 1
SIZE: 2.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\iter\fileopener.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from collections.abc import Iterable
from io import IOBase
from typing import Optional
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.datapipes.utils.common import get_file_binaries_from_pathnames
__all__ = [
"FileOpenerIterDataPipe",
]
@functional_datapipe("open_files")
class FileOpenerIterDataPipe(IterDataPipe[tuple[str, IOBase]]):
r"""
Given pathnames, opens files and yield pathname and file stream in a tuple (functional name: ``open_files``).
Args:
datapipe: Iterable datapipe that provides pathnames
mode: An optional string that specifies the mode in which
the file is opened by ``open()``. It defaults to ``r``, other options are
``b`` for reading in binary mode and ``t`` for text mode.
encoding: An optional string that specifies the encoding of the
underlying file. It defaults to ``None`` to match the default encoding of ``open``.
length: Nominal length of the datapipe
Note:
The opened file handles will be closed by Python's GC periodically. Users can choose
to close them explicitly.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader
>>> dp = FileLister(root=".").filter(lambda fname: fname.endswith('.txt'))
>>> dp = FileOpener(dp)
>>> dp = StreamReader(dp)
>>> list(dp)
[('./abc.txt', 'abc')]
"""
def __init__(
self,
datapipe: Iterable[str],
mode: str = "r",
encoding: Optional[str] = None,
length: int = -1,
):
super().__init__()
self.datapipe: Iterable = datapipe
self.mode: str = mode
self.encoding: Optional[str] = encoding
if self.mode not in ("b", "t", "rb", "rt", "r"):
raise ValueError(f"Invalid mode {mode}")
# TODO: enforce typing for each instance based on mode, otherwise
# `argument_validation` with this DataPipe may be potentially broken
if "b" in mode and encoding is not None:
raise ValueError("binary mode doesn't take an encoding argument")
self.length: int = length
# Remove annotation due to 'IOBase' is a general type and true type
# is determined at runtime based on mode. Some `DataPipe` requiring
# a subtype would cause mypy error.
def __iter__(self):
yield from get_file_binaries_from_pathnames(
self.datapipe, self.mode, self.encoding
)
def __len__(self):
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
```
|
===================================================================================================================================
SOURCE CODE FILE: grouping.py
LINES: 1
SIZE: 12.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\iter\grouping.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import warnings
from collections import defaultdict
from collections.abc import Iterator, Sized
from typing import Any, Callable, Optional, TypeVar
import torch.utils.data.datapipes.iter.sharding
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import DataChunk, IterDataPipe
from torch.utils.data.datapipes.utils.common import _check_unpickable_fn
__all__ = [
"BatcherIterDataPipe",
"GrouperIterDataPipe",
"UnBatcherIterDataPipe",
]
_T_co = TypeVar("_T_co", covariant=True)
def __getattr__(name: str):
if name in ["SHARDING_PRIORITIES", "ShardingFilterIterDataPipe"]:
warnings.warn(
f"`{name}` from `torch.utils.data.datapipes.iter.grouping` is going to be removed in PyTorch 2.1"
f"Please use `{name}` from the `torch.utils.data.datapipes.iter.sharding`",
category=FutureWarning,
stacklevel=2,
)
return getattr(torch.utils.data.datapipes.iter.sharding, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
@functional_datapipe("batch")
class BatcherIterDataPipe(IterDataPipe[DataChunk]):
r"""
Creates mini-batches of data (functional name: ``batch``).
An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, or ``length % batch_size`` for the
last batch if ``drop_last`` is set to ``False``.
Args:
datapipe: Iterable DataPipe being batched
batch_size: The size of each batch
drop_last: Option to drop the last batch if it's not full
wrapper_class: wrapper to apply onto each batch (type ``List``) before yielding,
defaults to ``DataChunk``
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(10))
>>> dp = dp.batch(batch_size=3, drop_last=True)
>>> list(dp)
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
datapipe: IterDataPipe
batch_size: int
drop_last: bool
def __init__(
self,
datapipe: IterDataPipe,
batch_size: int,
drop_last: bool = False,
wrapper_class: type[DataChunk] = DataChunk,
) -> None:
assert batch_size > 0, "Batch size is required to be larger than 0!"
super().__init__()
self.datapipe = datapipe
self.batch_size = batch_size
self.drop_last = drop_last
self.wrapper_class = wrapper_class
def __iter__(self) -> Iterator[DataChunk]:
batch: list = []
for x in self.datapipe:
batch.append(x)
if len(batch) == self.batch_size:
yield self.wrapper_class(batch)
batch = []
if len(batch) > 0:
if not self.drop_last:
yield self.wrapper_class(batch)
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
if self.drop_last:
return len(self.datapipe) // self.batch_size
else:
return (len(self.datapipe) + self.batch_size - 1) // self.batch_size
else:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
@functional_datapipe("unbatch")
class UnBatcherIterDataPipe(IterDataPipe):
r"""
Undos batching of data (functional name: ``unbatch``).
In other words, it flattens the data up to the specified level within a batched DataPipe.
Args:
datapipe: Iterable DataPipe being un-batched
unbatch_level: Defaults to ``1`` (only flattening the top level). If set to ``2``,
it will flatten the top two levels, and ``-1`` will flatten the entire DataPipe.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper([[[0, 1], [2]], [[3, 4], [5]], [[6]]])
>>> dp1 = source_dp.unbatch()
>>> list(dp1)
[[0, 1], [2], [3, 4], [5], [6]]
>>> dp2 = source_dp.unbatch(unbatch_level=2)
>>> list(dp2)
[0, 1, 2, 3, 4, 5, 6]
"""
def __init__(self, datapipe: IterDataPipe, unbatch_level: int = 1):
self.datapipe = datapipe
self.unbatch_level = unbatch_level
def __iter__(self):
for element in self.datapipe:
yield from self._dive(element, unbatch_level=self.unbatch_level)
def _dive(self, element, unbatch_level):
if unbatch_level < -1:
raise ValueError("unbatch_level must be -1 or >= 0")
if unbatch_level == -1:
if isinstance(element, (list, DataChunk)):
for item in element:
yield from self._dive(item, unbatch_level=-1)
else:
yield element
elif unbatch_level == 0:
yield element
else:
if isinstance(element, (list, DataChunk)):
for item in element:
yield from self._dive(item, unbatch_level=unbatch_level - 1)
else:
raise IndexError(
f"unbatch_level {self.unbatch_level} exceeds the depth of the DataPipe"
)
@functional_datapipe("groupby")
class GrouperIterDataPipe(IterDataPipe[DataChunk]):
r"""
Groups data from IterDataPipe by keys from ``group_key_fn``, yielding a ``DataChunk`` with batch size up to ``group_size``.
(functional name: ``groupby``).
The samples are read sequentially from the source ``datapipe``, and a batch of samples belonging to the same group
will be yielded as soon as the size of the batch reaches ``group_size``. When the buffer is full,
the DataPipe will yield the largest batch with the same key, provided that its size is larger
than ``guaranteed_group_size``. If its size is smaller, it will be dropped if ``drop_remaining=True``.
After iterating through the entirety of source ``datapipe``, everything not dropped due to the buffer capacity
will be yielded from the buffer, even if the group sizes are smaller than ``guaranteed_group_size``.
Args:
datapipe: Iterable datapipe to be grouped
group_key_fn: Function used to generate group key from the data of the source datapipe
keep_key: Option to yield the matching key along with the items in a tuple,
resulting in `(key, [items])` otherwise returning [items]
buffer_size: The size of buffer for ungrouped data
group_size: The max size of each group, a batch is yielded as soon as it reaches this size
guaranteed_group_size: The guaranteed minimum group size to be yielded in case the buffer is full
drop_remaining: Specifies if the group smaller than ``guaranteed_group_size`` will be dropped from buffer
when the buffer is full
Example:
>>> import os
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> def group_fn(file):
... return os.path.basename(file).split(".")[0]
>>> source_dp = IterableWrapper(["a.png", "b.png", "a.json", "b.json", "a.jpg", "c.json"])
>>> dp0 = source_dp.groupby(group_key_fn=group_fn)
>>> list(dp0)
[['a.png', 'a.json', 'a.jpg'], ['b.png', 'b.json'], ['c.json']]
>>> # A group is yielded as soon as its size equals to `group_size`
>>> dp1 = source_dp.groupby(group_key_fn=group_fn, group_size=2)
>>> list(dp1)
[['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']]
>>> # Scenario where `buffer` is full, and group 'a' needs to be yielded since its size > `guaranteed_group_size`
>>> dp2 = source_dp.groupby(group_key_fn=group_fn, buffer_size=3, group_size=3, guaranteed_group_size=2)
>>> list(dp2)
[['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']]
"""
def __init__(
self,
datapipe: IterDataPipe[_T_co],
group_key_fn: Callable[[_T_co], Any],
*,
keep_key: bool = False,
buffer_size: int = 10000,
group_size: Optional[int] = None,
guaranteed_group_size: Optional[int] = None,
drop_remaining: bool = False,
):
_check_unpickable_fn(group_key_fn)
self.datapipe = datapipe
self.group_key_fn = group_key_fn
self.keep_key = keep_key
self.max_buffer_size = buffer_size
self.buffer_elements: defaultdict[Any, list] = defaultdict(list)
self.curr_buffer_size = 0
self.group_size = group_size
self.guaranteed_group_size = None
if group_size is not None and buffer_size is not None:
assert 0 < group_size <= buffer_size
self.guaranteed_group_size = group_size
if guaranteed_group_size is not None:
assert group_size is not None and 0 < guaranteed_group_size <= group_size
self.guaranteed_group_size = guaranteed_group_size
self.drop_remaining = drop_remaining
self.wrapper_class = DataChunk
def _remove_biggest_key(self):
biggest_key = None
biggest_size = 0
result_to_yield = None
for findkey in self.buffer_elements.keys():
if len(self.buffer_elements[findkey]) > biggest_size:
biggest_size = len(self.buffer_elements[findkey])
biggest_key = findkey
if (
self.guaranteed_group_size is not None
and biggest_size < self.guaranteed_group_size
and not self.drop_remaining
):
raise RuntimeError(
"Failed to group items", str(self.buffer_elements[biggest_key])
)
if (
self.guaranteed_group_size is None
or biggest_size >= self.guaranteed_group_size
):
result_to_yield = self.buffer_elements[biggest_key]
self.curr_buffer_size -= biggest_size
del self.buffer_elements[biggest_key]
return result_to_yield
def __iter__(self):
for x in self.datapipe:
key = self.group_key_fn(x)
self.buffer_elements[key].append(x)
self.curr_buffer_size += 1
if self.group_size is not None and self.group_size == len(
self.buffer_elements[key]
):
result: DataChunk[Any] = self.wrapper_class(self.buffer_elements[key])
yield (key, result) if self.keep_key else result
self.curr_buffer_size -= len(self.buffer_elements[key])
del self.buffer_elements[key]
if self.curr_buffer_size == self.max_buffer_size:
result_to_yield = self._remove_biggest_key()
if result_to_yield is not None:
result = self.wrapper_class(result_to_yield)
yield (key, result) if self.keep_key else result
for key in tuple(self.buffer_elements.keys()):
result = self.wrapper_class(self.buffer_elements.pop(key))
self.curr_buffer_size -= len(result)
yield (key, result) if self.keep_key else result
def reset(self) -> None:
self.curr_buffer_size = 0
self.buffer_elements = defaultdict(list)
def __getstate__(self):
state = (
self.datapipe,
self.group_key_fn,
self.keep_key,
self.max_buffer_size,
self.group_size,
self.guaranteed_group_size,
self.drop_remaining,
self.wrapper_class,
self._valid_iterator_id,
self._number_of_samples_yielded,
)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(
self.datapipe,
self.group_key_fn,
self.keep_key,
self.max_buffer_size,
self.group_size,
self.guaranteed_group_size,
self.drop_remaining,
self.wrapper_class,
self._valid_iterator_id,
self._number_of_samples_yielded,
) = state
self.curr_buffer_size = 0
self.buffer_elements = defaultdict(list)
def __del__(self):
self.buffer_elements.clear()
```
|
========================================================================================================================================
SOURCE CODE FILE: routeddecoder.py
LINES: 1
SIZE: 2.74 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\iter\routeddecoder.py
ENCODING: utf-8
```py
from collections.abc import Iterable, Iterator, Sized
from io import BufferedIOBase
from typing import Any, Callable
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.datapipes.utils.common import _deprecation_warning
from torch.utils.data.datapipes.utils.decoder import (
basichandlers as decoder_basichandlers,
Decoder,
extension_extract_fn,
imagehandler as decoder_imagehandler,
)
__all__ = ["RoutedDecoderIterDataPipe"]
@functional_datapipe("routed_decode")
class RoutedDecoderIterDataPipe(IterDataPipe[tuple[str, Any]]):
r"""
Decodes binary streams from input DataPipe, yields pathname and decoded data in a tuple.
(functional name: ``routed_decode``)
Args:
datapipe: Iterable datapipe that provides pathname and binary stream in tuples
handlers: Optional user defined decoder handlers. If ``None``, basic and image decoder
handlers will be set as default. If multiple handles are provided, the priority
order follows the order of handlers (the first handler has the top priority)
key_fn: Function for decoder to extract key from pathname to dispatch handlers.
Default is set to extract file extension from pathname
Note:
When ``key_fn`` is specified returning anything other than extension, the default
handler will not work and users need to specify custom handler. Custom handler
could use regex to determine the eligibility to handle data.
"""
def __init__(
self,
datapipe: Iterable[tuple[str, BufferedIOBase]],
*handlers: Callable,
key_fn: Callable = extension_extract_fn,
) -> None:
super().__init__()
self.datapipe: Iterable[tuple[str, BufferedIOBase]] = datapipe
if not handlers:
handlers = (decoder_basichandlers, decoder_imagehandler("torch"))
self.decoder = Decoder(*handlers, key_fn=key_fn)
_deprecation_warning(
type(self).__name__,
deprecation_version="1.12",
removal_version="1.13",
old_functional_name="routed_decode",
)
def add_handler(self, *handler: Callable) -> None:
self.decoder.add_handler(*handler)
def __iter__(self) -> Iterator[tuple[str, Any]]:
for data in self.datapipe:
pathname = data[0]
result = self.decoder(data)
yield (pathname, result[pathname])
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
```
|
====================================================================================================================================
SOURCE CODE FILE: selecting.py
LINES: 1
SIZE: 3.33 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\iter\selecting.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from collections.abc import Iterator
from typing import Callable, TypeVar
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.datapipes.utils.common import (
_check_unpickable_fn,
StreamWrapper,
validate_input_col,
)
__all__ = ["FilterIterDataPipe"]
_T = TypeVar("_T")
_T_co = TypeVar("_T_co", covariant=True)
@functional_datapipe("filter")
class FilterIterDataPipe(IterDataPipe[_T_co]):
r"""
Filters out elements from the source datapipe according to input ``filter_fn`` (functional name: ``filter``).
Args:
datapipe: Iterable DataPipe being filtered
filter_fn: Customized function mapping an element to a boolean.
input_col: Index or indices of data which ``filter_fn`` is applied, such as:
- ``None`` as default to apply ``filter_fn`` to the data directly.
- Integer(s) is used for list/tuple.
- Key(s) is used for dict.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> def is_even(n):
... return n % 2 == 0
>>> dp = IterableWrapper(range(5))
>>> filter_dp = dp.filter(filter_fn=is_even)
>>> list(filter_dp)
[0, 2, 4]
"""
datapipe: IterDataPipe[_T_co]
filter_fn: Callable
def __init__(
self,
datapipe: IterDataPipe[_T_co],
filter_fn: Callable,
input_col=None,
) -> None:
super().__init__()
self.datapipe = datapipe
_check_unpickable_fn(filter_fn)
self.filter_fn = filter_fn # type: ignore[assignment]
self.input_col = input_col
validate_input_col(filter_fn, input_col)
def _apply_filter_fn(self, data) -> bool:
if self.input_col is None:
return self.filter_fn(data)
elif isinstance(self.input_col, (list, tuple)):
args = tuple(data[col] for col in self.input_col)
return self.filter_fn(*args)
else:
return self.filter_fn(data[self.input_col])
def __iter__(self) -> Iterator[_T_co]:
for data in self.datapipe:
condition, filtered = self._returnIfTrue(data)
if condition:
yield filtered
else:
StreamWrapper.close_streams(data)
def _returnIfTrue(self, data: _T) -> tuple[bool, _T]:
condition = self._apply_filter_fn(data)
if df_wrapper.is_column(condition):
# We are operating on DataFrames filter here
result = []
for idx, mask in enumerate(df_wrapper.iterate(condition)):
if mask:
result.append(df_wrapper.get_item(data, idx))
if len(result):
return True, df_wrapper.concat(result)
else:
return False, None # type: ignore[return-value]
if not isinstance(condition, bool):
raise ValueError(
"Boolean output is required for `filter_fn` of FilterIterDataPipe, got",
type(condition),
)
return condition, data
```
|
===================================================================================================================================
SOURCE CODE FILE: sharding.py
LINES: 1
SIZE: 3.53 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\iter\sharding.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from collections.abc import Sized
from enum import IntEnum
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
__all__ = [
"SHARDING_PRIORITIES",
"ShardingFilterIterDataPipe",
]
class SHARDING_PRIORITIES(IntEnum):
DEFAULT = 1
DISTRIBUTED = 2
MULTIPROCESSING = 3
class _ShardingIterDataPipe(IterDataPipe):
def apply_sharding(
self,
num_of_instances: int,
instance_id: int,
sharding_group: SHARDING_PRIORITIES,
):
raise NotImplementedError
@functional_datapipe("sharding_filter")
class ShardingFilterIterDataPipe(_ShardingIterDataPipe):
r"""
Wrapper that allows DataPipe to be sharded (functional name: ``sharding_filter``).
After ``apply_sharding`` is called, each instance of the DataPipe (on different workers) will have every `n`-th element of the
original DataPipe, where `n` equals to the number of instances.
Args:
source_datapipe: Iterable DataPipe that will be sharded
"""
def __init__(self, source_datapipe: IterDataPipe, sharding_group_filter=None):
self.source_datapipe = source_datapipe
self.sharding_group_filter = sharding_group_filter
self.groups: dict[int, tuple[int, int]] = {}
self.num_of_instances = 1
self.instance_id = 0
self._update_num_of_instances()
def apply_sharding(
self, num_of_instances, instance_id, sharding_group=SHARDING_PRIORITIES.DEFAULT
):
if instance_id >= num_of_instances:
raise ValueError(
f"instance_id({instance_id}) should be smaller than num_of_instances({num_of_instances})"
)
if sharding_group == SHARDING_PRIORITIES.DEFAULT:
if len(self.groups) and SHARDING_PRIORITIES.DEFAULT not in self.groups:
raise RuntimeError(
"ShardingFilter cannot mix DEFAULT and non DEFAULT groups"
)
else:
if SHARDING_PRIORITIES.DEFAULT in self.groups:
raise RuntimeError(
"ShardingFilter cannot mix DEFAULT and non DEFAULT groups"
)
self.groups[sharding_group] = (num_of_instances, instance_id)
self._update_num_of_instances()
def _update_num_of_instances(self):
sorted_sharding_groups = [
self.groups[key]
for key in sorted(self.groups.keys())
if self.sharding_group_filter is None or key == self.sharding_group_filter
]
sorted_sharding_groups.reverse()
self.num_of_instances = 1
self.instance_id = 0
for group_num_of_instances, group_instance_id in sorted_sharding_groups:
self.instance_id += self.num_of_instances * group_instance_id
self.num_of_instances *= group_num_of_instances
def __iter__(self):
for i, item in enumerate(self.source_datapipe):
if i % self.num_of_instances == self.instance_id:
yield item
def __len__(self):
if isinstance(self.source_datapipe, Sized):
return len(self.source_datapipe) // self.num_of_instances + (
1
if (
self.instance_id < len(self.source_datapipe) % self.num_of_instances
)
else 0
)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
```
|
=======================================================================================================================================
SOURCE CODE FILE: streamreader.py
LINES: 1
SIZE: 1.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\iter\streamreader.py
ENCODING: utf-8
```py
from collections.abc import Iterator
from io import IOBase
from typing import Optional
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
__all__ = ["StreamReaderIterDataPipe"]
@functional_datapipe("read_from_stream")
class StreamReaderIterDataPipe(IterDataPipe[tuple[str, bytes]]):
r"""
Given IO streams and their label names, yield bytes with label name as tuple.
(functional name: ``read_from_stream``).
Args:
datapipe: Iterable DataPipe provides label/URL and byte stream
chunk: Number of bytes to be read from stream per iteration.
If ``None``, all bytes will be read until the EOF.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper, StreamReader
>>> from io import StringIO
>>> dp = IterableWrapper([("alphabet", StringIO("abcde"))])
>>> list(StreamReader(dp, chunk=1))
[('alphabet', 'a'), ('alphabet', 'b'), ('alphabet', 'c'), ('alphabet', 'd'), ('alphabet', 'e')]
"""
def __init__(
self, datapipe: IterDataPipe[tuple[str, IOBase]], chunk: Optional[int] = None
):
self.datapipe = datapipe
self.chunk = chunk
def __iter__(self) -> Iterator[tuple[str, bytes]]:
for furl, stream in self.datapipe:
while True:
d = stream.read(self.chunk)
if not d:
stream.close()
break
yield (furl, d)
```
|
================================================================================================================================
SOURCE CODE FILE: utils.py
LINES: 1
SIZE: 1.82 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\iter\utils.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import copy
import warnings
from torch.utils.data.datapipes.datapipe import IterDataPipe
__all__ = ["IterableWrapperIterDataPipe"]
class IterableWrapperIterDataPipe(IterDataPipe):
r"""
Wraps an iterable object to create an IterDataPipe.
Args:
iterable: Iterable object to be wrapped into an IterDataPipe
deepcopy: Option to deepcopy input iterable object for each
iterator. The copy is made when the first element is read in ``iter()``.
.. note::
If ``deepcopy`` is explicitly set to ``False``, users should ensure
that the data pipeline doesn't contain any in-place operations over
the iterable instance to prevent data inconsistency across iterations.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(10))
>>> list(dp)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
def __init__(self, iterable, deepcopy=True):
self.iterable = iterable
self.deepcopy = deepcopy
def __iter__(self):
source_data = self.iterable
if self.deepcopy:
try:
source_data = copy.deepcopy(self.iterable)
# For the case that data cannot be deep-copied,
# all in-place operations will affect iterable variable.
# When this DataPipe is iterated second time, it will
# yield modified items.
except TypeError:
warnings.warn(
"The input iterable can not be deepcopied, "
"please be aware of in-place modification would affect source data."
)
yield from source_data
def __len__(self):
return len(self.iterable)
```
|
==================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\map\__init__.py
ENCODING: utf-8
```py
# Functional DataPipe
from torch.utils.data.datapipes.map.callable import MapperMapDataPipe as Mapper
from torch.utils.data.datapipes.map.combinatorics import (
ShufflerIterDataPipe as Shuffler,
)
from torch.utils.data.datapipes.map.combining import (
ConcaterMapDataPipe as Concater,
ZipperMapDataPipe as Zipper,
)
from torch.utils.data.datapipes.map.grouping import BatcherMapDataPipe as Batcher
from torch.utils.data.datapipes.map.utils import (
SequenceWrapperMapDataPipe as SequenceWrapper,
)
__all__ = ["Batcher", "Concater", "Mapper", "SequenceWrapper", "Shuffler", "Zipper"]
# Please keep this list sorted
assert __all__ == sorted(__all__)
```
|
==================================================================================================================================
SOURCE CODE FILE: callable.py
LINES: 1
SIZE: 1.88 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\map\callable.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from typing import Callable, TypeVar
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import MapDataPipe
from torch.utils.data.datapipes.utils.common import _check_unpickable_fn
__all__ = ["MapperMapDataPipe", "default_fn"]
_T_co = TypeVar("_T_co", covariant=True)
# Default function to return each item directly
# In order to keep datapipe picklable, eliminates the usage
# of python lambda function
def default_fn(data):
return data
@functional_datapipe("map")
class MapperMapDataPipe(MapDataPipe[_T_co]):
r"""
Apply the input function over each item from the source DataPipe (functional name: ``map``).
The function can be any regular Python function or partial object. Lambda
function is not recommended as it is not supported by pickle.
Args:
datapipe: Source MapDataPipe
fn: Function being applied to each item
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper, Mapper
>>> def add_one(x):
... return x + 1
>>> dp = SequenceWrapper(range(10))
>>> map_dp_1 = dp.map(add_one)
>>> list(map_dp_1)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> map_dp_2 = Mapper(dp, lambda x: x + 1)
>>> list(map_dp_2)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
datapipe: MapDataPipe
fn: Callable
def __init__(
self,
datapipe: MapDataPipe,
fn: Callable = default_fn,
) -> None:
super().__init__()
self.datapipe = datapipe
_check_unpickable_fn(fn)
self.fn = fn # type: ignore[assignment]
def __len__(self) -> int:
return len(self.datapipe)
def __getitem__(self, index) -> _T_co:
return self.fn(self.datapipe[index])
```
|
=======================================================================================================================================
SOURCE CODE FILE: combinatorics.py
LINES: 1
SIZE: 4.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\map\combinatorics.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import random
from collections.abc import Iterator
from typing import Optional, TypeVar
import torch
from torch.utils.data.datapipes.datapipe import IterDataPipe, MapDataPipe
__all__ = ["ShufflerIterDataPipe"]
_T_co = TypeVar("_T_co", covariant=True)
# @functional_datapipe('shuffle')
class ShufflerIterDataPipe(IterDataPipe[_T_co]):
r"""
Shuffle the input MapDataPipe via its indices (functional name: ``shuffle``).
When it is used with :class:`~torch.utils.data.DataLoader`, the methods to
set up random seed are different based on :attr:`num_workers`.
For single-process mode (:attr:`num_workers == 0`), the random seed is set before
the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process
mode (:attr:`num_worker > 0`), ``worker_init_fn`` is used to set up a random seed
for each worker process.
Args:
datapipe: MapDataPipe being shuffled
indices: a list of indices of the MapDataPipe. If not provided, we assume it uses 0-based indexing
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper
>>> dp = SequenceWrapper(range(10))
>>> shuffle_dp = dp.shuffle().set_seed(0)
>>> list(shuffle_dp)
[7, 8, 1, 5, 3, 4, 2, 0, 9, 6]
>>> list(shuffle_dp)
[6, 1, 9, 5, 2, 4, 7, 3, 8, 0]
>>> # Reset seed for Shuffler
>>> shuffle_dp = shuffle_dp.set_seed(0)
>>> list(shuffle_dp)
[7, 8, 1, 5, 3, 4, 2, 0, 9, 6]
Note:
Even thought this ``shuffle`` operation takes a ``MapDataPipe`` as the input, it would return an
``IterDataPipe`` rather than a ``MapDataPipe``, because ``MapDataPipe`` should be non-sensitive to
the order of data order for the sake of random reads, but ``IterDataPipe`` depends on the order
of data during data-processing.
"""
datapipe: MapDataPipe[_T_co]
_enabled: bool
_seed: Optional[int]
_rng: random.Random
def __init__(
self,
datapipe: MapDataPipe[_T_co],
*,
indices: Optional[list] = None,
) -> None:
super().__init__()
self.datapipe = datapipe
self.indices = list(range(len(datapipe))) if indices is None else indices
self._enabled = True
self._seed = None
self._rng = random.Random()
self._shuffled_indices: list = self.indices
def set_shuffle(self, shuffle=True):
self._enabled = shuffle
return self
def set_seed(self, seed: int):
self._seed = seed
return self
def __iter__(self) -> Iterator[_T_co]:
if not self._enabled:
for idx in self.indices:
yield self.datapipe[idx]
else:
while self._shuffled_indices:
idx = self._shuffled_indices.pop()
yield self.datapipe[idx]
def reset(self) -> None:
if self._enabled and self._seed is None:
self._seed = int(torch.empty((), dtype=torch.int64).random_().item())
self._rng.seed(self._seed)
self._seed = None
self._shuffled_indices = self._rng.sample(self.indices, len(self.indices))
def __len__(self) -> int:
return len(self.datapipe)
def __getstate__(self):
state = (
self.datapipe,
self.indices,
self._enabled,
self._seed,
self._rng.getstate(),
self._shuffled_indices,
self._valid_iterator_id,
self._number_of_samples_yielded,
)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(
self.datapipe,
self.indices,
self._enabled,
self._seed,
rng_state,
self._shuffled_indices,
self._valid_iterator_id,
self._number_of_samples_yielded,
) = state
self._rng = random.Random()
self._rng.setstate(rng_state)
MapDataPipe.register_datapipe_as_function("shuffle", ShufflerIterDataPipe)
```
|
===================================================================================================================================
SOURCE CODE FILE: combining.py
LINES: 1
SIZE: 3.71 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\map\combining.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from collections.abc import Sized
from typing import TypeVar
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import MapDataPipe
__all__ = ["ConcaterMapDataPipe", "ZipperMapDataPipe"]
_T_co = TypeVar("_T_co", covariant=True)
@functional_datapipe("concat")
class ConcaterMapDataPipe(MapDataPipe):
r"""
Concatenate multiple Map DataPipes (functional name: ``concat``).
The new index of is the cumulative sum of source DataPipes.
For example, if there are 2 source DataPipes both with length 5,
index 0 to 4 of the resulting `ConcatMapDataPipe` would refer to
elements of the first DataPipe, and 5 to 9 would refer to elements
of the second DataPipe.
Args:
datapipes: Map DataPipes being concatenated
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper
>>> dp1 = SequenceWrapper(range(3))
>>> dp2 = SequenceWrapper(range(3))
>>> concat_dp = dp1.concat(dp2)
>>> list(concat_dp)
[0, 1, 2, 0, 1, 2]
"""
datapipes: tuple[MapDataPipe]
def __init__(self, *datapipes: MapDataPipe):
if len(datapipes) == 0:
raise ValueError("Expected at least one DataPipe, but got nothing")
if not all(isinstance(dp, MapDataPipe) for dp in datapipes):
raise TypeError("Expected all inputs to be `MapDataPipe`")
if not all(isinstance(dp, Sized) for dp in datapipes):
raise TypeError("Expected all inputs to be `Sized`")
self.datapipes = datapipes # type: ignore[assignment]
def __getitem__(self, index) -> _T_co: # type: ignore[type-var]
offset = 0
for dp in self.datapipes:
if index - offset < len(dp):
return dp[index - offset]
else:
offset += len(dp)
raise IndexError(f"Index {index} is out of range.")
def __len__(self) -> int:
return sum(len(dp) for dp in self.datapipes)
@functional_datapipe("zip")
class ZipperMapDataPipe(MapDataPipe[tuple[_T_co, ...]]):
r"""
Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``).
This MataPipe is out of bound as soon as the shortest input DataPipe is exhausted.
Args:
*datapipes: Map DataPipes being aggregated
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper
>>> dp1 = SequenceWrapper(range(3))
>>> dp2 = SequenceWrapper(range(10, 13))
>>> zip_dp = dp1.zip(dp2)
>>> list(zip_dp)
[(0, 10), (1, 11), (2, 12)]
"""
datapipes: tuple[MapDataPipe[_T_co], ...]
def __init__(self, *datapipes: MapDataPipe[_T_co]) -> None:
if len(datapipes) == 0:
raise ValueError("Expected at least one DataPipe, but got nothing")
if not all(isinstance(dp, MapDataPipe) for dp in datapipes):
raise TypeError("Expected all inputs to be `MapDataPipe`")
if not all(isinstance(dp, Sized) for dp in datapipes):
raise TypeError("Expected all inputs to be `Sized`")
self.datapipes = datapipes
def __getitem__(self, index) -> tuple[_T_co, ...]:
res = []
for dp in self.datapipes:
try:
res.append(dp[index])
except IndexError as e:
raise IndexError(
f"Index {index} is out of range for one of the input MapDataPipes {dp}."
) from e
return tuple(res)
def __len__(self) -> int:
return min(len(dp) for dp in self.datapipes)
```
|
==================================================================================================================================
SOURCE CODE FILE: grouping.py
LINES: 1
SIZE: 2.47 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\map\grouping.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from collections.abc import Sized
from typing import TypeVar
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import DataChunk, MapDataPipe
__all__ = ["BatcherMapDataPipe"]
_T = TypeVar("_T")
@functional_datapipe("batch")
class BatcherMapDataPipe(MapDataPipe[DataChunk]):
r"""
Create mini-batches of data (functional name: ``batch``).
An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``,
or ``length % batch_size`` for the last batch if ``drop_last`` is set to ``False``.
Args:
datapipe: Iterable DataPipe being batched
batch_size: The size of each batch
drop_last: Option to drop the last batch if it's not full
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper
>>> dp = SequenceWrapper(range(10))
>>> batch_dp = dp.batch(batch_size=2)
>>> list(batch_dp)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
"""
datapipe: MapDataPipe
batch_size: int
drop_last: bool
def __init__(
self,
datapipe: MapDataPipe[_T],
batch_size: int,
drop_last: bool = False,
wrapper_class: type[DataChunk] = DataChunk,
) -> None:
assert batch_size > 0, "Batch size is required to be larger than 0!"
super().__init__()
self.datapipe = datapipe
self.batch_size = batch_size
self.drop_last = drop_last
self.wrapper_class = wrapper_class
def __getitem__(self, index) -> DataChunk:
batch: list = []
indices = range(index * self.batch_size, (index + 1) * self.batch_size)
try:
batch.extend(self.datapipe[i] for i in indices)
return self.wrapper_class(batch)
except IndexError as e:
if not self.drop_last and len(batch) > 0:
return self.wrapper_class(batch)
else:
raise IndexError(f"Index {index} is out of bound.") from e
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
if self.drop_last:
return len(self.datapipe) // self.batch_size
else:
return (len(self.datapipe) + self.batch_size - 1) // self.batch_size
else:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
```
|
===============================================================================================================================
SOURCE CODE FILE: utils.py
LINES: 1
SIZE: 1.59 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\map\utils.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import copy
import warnings
from torch.utils.data.datapipes.datapipe import MapDataPipe
__all__ = ["SequenceWrapperMapDataPipe"]
class SequenceWrapperMapDataPipe(MapDataPipe):
r"""
Wraps a sequence object into a MapDataPipe.
Args:
sequence: Sequence object to be wrapped into an MapDataPipe
deepcopy: Option to deepcopy input sequence object
.. note::
If ``deepcopy`` is set to False explicitly, users should ensure
that data pipeline doesn't contain any in-place operations over
the iterable instance, in order to prevent data inconsistency
across iterations.
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper
>>> dp = SequenceWrapper(range(10))
>>> list(dp)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> dp = SequenceWrapper({'a': 100, 'b': 200, 'c': 300, 'd': 400})
>>> dp['a']
100
"""
def __init__(self, sequence, deepcopy=True):
if deepcopy:
try:
self.sequence = copy.deepcopy(sequence)
except TypeError:
warnings.warn(
"The input sequence can not be deepcopied, "
"please be aware of in-place modification would affect source data"
)
self.sequence = sequence
else:
self.sequence = sequence
def __getitem__(self, index):
return self.sequence[index]
def __len__(self):
return len(self.sequence)
```
|
====================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\utils\__init__.py
ENCODING: utf-8
```py
```
|
==================================================================================================================================
SOURCE CODE FILE: common.py
LINES: 4
SIZE: 13.78 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\utils\common.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import fnmatch
import functools
import inspect
import os
import warnings
from collections.abc import Iterable
from io import IOBase
from typing import Any, Callable, Optional, Union
from torch.utils._import_utils import dill_available
__all__ = [
"validate_input_col",
"StreamWrapper",
"get_file_binaries_from_pathnames",
"get_file_pathnames_from_root",
"match_masks",
"validate_pathname_binary_tuple",
]
# BC for torchdata
DILL_AVAILABLE = dill_available()
def validate_input_col(fn: Callable, input_col: Optional[Union[int, tuple, list]]):
"""
Check that function used in a callable datapipe works with the input column.
This simply ensures that the number of positional arguments matches the size
of the input column. The function must not contain any non-default
keyword-only arguments.
Examples:
>>> # xdoctest: +SKIP("Failing on some CI machines")
>>> def f(a, b, *, c=1):
>>> return a + b + c
>>> def f_def(a, b=1, *, c=1):
>>> return a + b + c
>>> assert validate_input_col(f, [1, 2])
>>> assert validate_input_col(f_def, 1)
>>> assert validate_input_col(f_def, [1, 2])
Notes:
If the function contains variable positional (`inspect.VAR_POSITIONAL`) arguments,
for example, f(a, *args), the validator will accept any size of input column
greater than or equal to the number of positional arguments.
(in this case, 1).
Args:
fn: The function to check.
input_col: The input column to check.
Raises:
ValueError: If the function is not compatible with the input column.
"""
try:
sig = inspect.signature(fn)
except (
ValueError
): # Signature cannot be inspected, likely it is a built-in fn or written in C
return
if isinstance(input_col, (list, tuple)):
input_col_size = len(input_col)
else:
input_col_size = 1
pos = []
var_positional = False
non_default_kw_only = []
for p in sig.parameters.values():
if p.kind in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
):
pos.append(p)
elif p.kind is inspect.Parameter.VAR_POSITIONAL:
var_positional = True
elif p.kind is inspect.Parameter.KEYWORD_ONLY:
if p.default is p.empty:
non_default_kw_only.append(p)
else:
continue
if isinstance(fn, functools.partial):
fn_name = getattr(fn.func, "__name__", repr(fn.func))
else:
fn_name = getattr(fn, "__name__", repr(fn))
if len(non_default_kw_only) > 0:
raise ValueError(
f"The function {fn_name} takes {len(non_default_kw_only)} "
f"non-default keyword-only parameters, which is not allowed."
)
if len(sig.parameters) < input_col_size:
if not var_positional:
raise ValueError(
f"The function {fn_name} takes {len(sig.parameters)} "
f"parameters, but {input_col_size} are required."
)
else:
if len(pos) > input_col_size:
if any(p.default is p.empty for p in pos[input_col_size:]):
raise ValueError(
f"The function {fn_name} takes {len(pos)} "
f"positional parameters, but {input_col_size} are required."
)
elif len(pos) < input_col_size:
if not var_positional:
raise ValueError(
f"The function {fn_name} takes {len(pos)} "
f"positional parameters, but {input_col_size} are required."
)
def _is_local_fn(fn):
# Functions or Methods
if hasattr(fn, "__code__"):
return fn.__code__.co_flags & inspect.CO_NESTED
# Callable Objects
else:
if hasattr(fn, "__qualname__"):
return "<locals>" in fn.__qualname__
fn_type = type(fn)
if hasattr(fn_type, "__qualname__"):
return "<locals>" in fn_type.__qualname__
return False
def _check_unpickable_fn(fn: Callable):
"""
Check function is pickable or not.
If it is a lambda or local function, a UserWarning will be raised. If it's not a callable function, a TypeError will be raised.
"""
if not callable(fn):
raise TypeError(f"A callable function is expected, but {type(fn)} is provided.")
# Extract function from partial object
# Nested partial function is automatically expanded as a single partial object
if isinstance(fn, functools.partial):
fn = fn.func
# Local function
if _is_local_fn(fn) and not dill_available():
warnings.warn(
"Local function is not supported by pickle, please use "
"regular python function or functools.partial instead."
)
return
# Lambda function
if hasattr(fn, "__name__") and fn.__name__ == "<lambda>" and not dill_available():
warnings.warn(
"Lambda function is not supported by pickle, please use "
"regular python function or functools.partial instead."
)
return
def match_masks(name: str, masks: Union[str, list[str]]) -> bool:
# empty mask matches any input name
if not masks:
return True
if isinstance(masks, str):
return fnmatch.fnmatch(name, masks)
for mask in masks:
if fnmatch.fnmatch(name, mask):
return True
return False
def get_file_pathnames_from_root(
root: str,
masks: Union[str, list[str]],
recursive: bool = False,
abspath: bool = False,
non_deterministic: bool = False,
) -> Iterable[str]:
# print out an error message and raise the error out
def onerror(err: OSError):
warnings.warn(err.filename + " : " + err.strerror)
raise err
if os.path.isfile(root):
path = root
if abspath:
path = os.path.abspath(path)
fname = os.path.basename(path)
if match_masks(fname, masks):
yield path
else:
for path, dirs, files in os.walk(root, onerror=onerror):
if abspath:
path = os.path.abspath(path)
if not non_deterministic:
files.sort()
for f in files:
if match_masks(f, masks):
yield os.path.join(path, f)
if not recursive:
break
if not non_deterministic:
# Note that this is in-place modifying the internal list from `os.walk`
# This only works because `os.walk` doesn't shallow copy before turn
# https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/os.py#L407
dirs.sort()
def get_file_binaries_from_pathnames(
pathnames: Iterable, mode: str, encoding: Optional[str] = None
):
if not isinstance(pathnames, Iterable):
pathnames = [
pathnames,
]
if mode in ("b", "t"):
mode = "r" + mode
for pathname in pathnames:
if not isinstance(pathname, str):
raise TypeError(
f"Expected string type for pathname, but got {type(pathname)}"
)
yield pathname, StreamWrapper(open(pathname, mode, encoding=encoding))
def validate_pathname_binary_tuple(data: tuple[str, IOBase]):
if not isinstance(data, tuple):
raise TypeError(
f"pathname binary data should be tuple type, but it is type {type(data)}"
)
if len(data) != 2:
raise TypeError(
f"pathname binary stream tuple length should be 2, but got {len(data)}"
)
if not isinstance(data[0], str):
raise TypeError(
f"pathname within the tuple should have string type pathname, but it is type {type(data[0])}"
)
if not isinstance(data[1], IOBase) and not isinstance(data[1], StreamWrapper):
raise TypeError(
f"binary stream within the tuple should have IOBase or"
f"its subclasses as type, but it is type {type(data[1])}"
)
# Deprecated function names and its corresponding DataPipe type and kwargs for the `_deprecation_warning` function
_iter_deprecated_functional_names: dict[str, dict] = {}
_map_deprecated_functional_names: dict[str, dict] = {}
def _deprecation_warning(
old_class_name: str,
*,
deprecation_version: str,
removal_version: str,
old_functional_name: str = "",
old_argument_name: str = "",
new_class_name: str = "",
new_functional_name: str = "",
new_argument_name: str = "",
deprecate_functional_name_only: bool = False,
) -> None:
if new_functional_name and not old_functional_name:
raise ValueError(
"Old functional API needs to be specified for the deprecation warning."
)
if new_argument_name and not old_argument_name:
raise ValueError(
"Old argument name needs to be specified for the deprecation warning."
)
if old_functional_name and old_argument_name:
raise ValueError(
"Deprecating warning for functional API and argument should be separated."
)
msg = f"`{old_class_name}()`"
if deprecate_functional_name_only and old_functional_name:
msg = f"{msg}'s functional API `.{old_functional_name}()` is"
elif old_functional_name:
msg = f"{msg} and its functional API `.{old_functional_name}()` are"
elif old_argument_name:
msg = f"The argument `{old_argument_name}` of {msg} is"
else:
msg = f"{msg} is"
msg = (
f"{msg} deprecated since {deprecation_version} and will be removed in {removal_version}."
f"\nSee https://github.com/pytorch/data/issues/163 for details."
)
if new_class_name or new_functional_name:
msg = f"{msg}\nPlease use"
if new_class_name:
msg = f"{msg} `{new_class_name}()`"
if new_class_name and new_functional_name:
msg = f"{msg} or"
if new_functional_name:
msg = f"{msg} `.{new_functional_name}()`"
msg = f"{msg} instead."
if new_argument_name:
msg = f"{msg}\nPlease use `{old_class_name}({new_argument_name}=)` instead."
warnings.warn(msg, FutureWarning)
class StreamWrapper:
"""
StreamWrapper is introduced to wrap file handler generated by DataPipe operation like `FileOpener`.
StreamWrapper would guarantee the wrapped file handler is closed when it's out of scope.
"""
session_streams: dict[Any, int] = {}
debug_unclosed_streams: bool = False
def __init__(self, file_obj, parent_stream=None, name=None):
self.file_obj = file_obj
self.child_counter = 0
self.parent_stream = parent_stream
self.close_on_last_child = False
self.name = name
self.closed = False
if parent_stream is not None:
if not isinstance(parent_stream, StreamWrapper):
raise RuntimeError(
f"Parent stream should be StreamWrapper, {type(parent_stream)} was given"
)
parent_stream.child_counter += 1
self.parent_stream = parent_stream
if StreamWrapper.debug_unclosed_streams:
StreamWrapper.session_streams[self] = 1
@classmethod
def close_streams(cls, v, depth=0):
"""Traverse structure and attempts to close all found StreamWrappers on best effort basis."""
if depth > 10:
return
if isinstance(v, StreamWrapper):
v.close()
else:
# Traverse only simple structures
if isinstance(v, dict):
for vv in v.values():
cls.close_streams(vv, depth=depth + 1)
elif isinstance(v, (list, tuple)):
for vv in v:
cls.close_streams(vv, depth=depth + 1)
def __getattr__(self, name):
file_obj = self.__dict__["file_obj"]
return getattr(file_obj, name)
def close(self, *args, **kwargs):
if self.closed:
return
if StreamWrapper.debug_unclosed_streams:
del StreamWrapper.session_streams[self]
if hasattr(self, "parent_stream") and self.parent_stream is not None:
self.parent_stream.child_counter -= 1
if (
not self.parent_stream.child_counter
and self.parent_stream.close_on_last_child
):
self.parent_stream.close()
try:
self.file_obj.close(*args, **kwargs)
except AttributeError:
pass
self.closed = True
def autoclose(self):
"""Automatically close stream when all child streams are closed or if there are none."""
self.close_on_last_child = True
if self.child_counter == 0:
self.close()
def __dir__(self):
attrs = list(self.__dict__.keys()) + list(StreamWrapper.__dict__.keys())
attrs += dir(self.file_obj)
return list(set(attrs))
def __del__(self):
if not self.closed:
self.close()
def __iter__(self):
yield from self.file_obj
def __next__(self):
return next(self.file_obj)
def __repr__(self):
if self.name is None:
return f"StreamWrapper<{self.file_obj!r}>"
else:
return f"StreamWrapper<{self.name},{self.file_obj!r}>"
def __getstate__(self):
return self.file_obj
def __setstate__(self, obj):
self.file_obj = obj
```
|
===================================================================================================================================
SOURCE CODE FILE: decoder.py
LINES: 1
SIZE: 12.08 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\utils\decoder.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
# This file takes partial of the implementation from NVIDIA's webdataset at here:
# https://github.com/tmbdev/webdataset/blob/master/webdataset/autodecode.py
import io
import json
import os.path
import pickle
import tempfile
import torch
from torch.utils.data.datapipes.utils.common import StreamWrapper
__all__ = [
"Decoder",
"ImageHandler",
"MatHandler",
"audiohandler",
"basichandlers",
"extension_extract_fn",
"handle_extension",
"imagehandler",
"mathandler",
"videohandler",
]
################################################################
# handle basic datatypes
################################################################
def basichandlers(extension: str, data):
"""Transforms raw data (byte stream) into python objects.
Looks at the extension and loads the data into a python object supporting
the corresponding extension.
Args:
extension (str): The file extension
data (byte stream): Data to load into a python object.
Returns:
object: The data loaded into a corresponding python object
supporting the extension.
Example:
>>> import pickle
>>> data = pickle.dumps('some data')
>>> new_data = basichandlers('pickle', data)
>>> new_data
some data
The transformation of data for extensions are:
- txt, text, transcript: utf-8 decoded data of str format
- cls, cls2, class, count, index, inx, id: int
- json, jsn: json loaded data
- pickle, pyd: pickle loaded data
- pt: torch loaded data
"""
if extension in "txt text transcript":
return data.decode("utf-8")
if extension in "cls cls2 class count index inx id".split():
try:
return int(data)
except ValueError:
return None
if extension in "json jsn":
return json.loads(data)
if extension in "pyd pickle".split():
return pickle.loads(data)
if extension in "pt".split():
stream = io.BytesIO(data)
return torch.load(stream)
# if extension in "ten tb".split():
# from . import tenbin
# return tenbin.decode_buffer(data)
# if extension in "mp msgpack msg".split():
# import msgpack
# return msgpack.unpackb(data)
return None
################################################################
# handle images
################################################################
imagespecs = {
"l8": ("numpy", "uint8", "l"),
"rgb8": ("numpy", "uint8", "rgb"),
"rgba8": ("numpy", "uint8", "rgba"),
"l": ("numpy", "float", "l"),
"rgb": ("numpy", "float", "rgb"),
"rgba": ("numpy", "float", "rgba"),
"torchl8": ("torch", "uint8", "l"),
"torchrgb8": ("torch", "uint8", "rgb"),
"torchrgba8": ("torch", "uint8", "rgba"),
"torchl": ("torch", "float", "l"),
"torchrgb": ("torch", "float", "rgb"),
"torch": ("torch", "float", "rgb"),
"torchrgba": ("torch", "float", "rgba"),
"pill": ("pil", None, "l"),
"pil": ("pil", None, "rgb"),
"pilrgb": ("pil", None, "rgb"),
"pilrgba": ("pil", None, "rgba"),
}
def handle_extension(extensions, f):
"""
Return a decoder handler function for the list of extensions.
Extensions can be a space separated list of extensions.
Extensions can contain dots, in which case the corresponding number
of extension components must be present in the key given to f.
Comparisons are case insensitive.
Examples:
handle_extension("jpg jpeg", my_decode_jpg) # invoked for any file.jpg
handle_extension("seg.jpg", special_case_jpg) # invoked only for file.seg.jpg
"""
extensions = extensions.lower().split()
def g(key, data):
extension = key.lower().split(".")
for target in extensions:
target = target.split(".")
if len(target) > len(extension):
continue
if extension[-len(target) :] == target:
return f(data)
return None
return g
class ImageHandler:
"""
Decode image data using the given `imagespec`.
The `imagespec` specifies whether the image is decoded
to numpy/torch/pi, decoded to uint8/float, and decoded
to l/rgb/rgba:
- l8: numpy uint8 l
- rgb8: numpy uint8 rgb
- rgba8: numpy uint8 rgba
- l: numpy float l
- rgb: numpy float rgb
- rgba: numpy float rgba
- torchl8: torch uint8 l
- torchrgb8: torch uint8 rgb
- torchrgba8: torch uint8 rgba
- torchl: torch float l
- torchrgb: torch float rgb
- torch: torch float rgb
- torchrgba: torch float rgba
- pill: pil None l
- pil: pil None rgb
- pilrgb: pil None rgb
- pilrgba: pil None rgba
"""
def __init__(self, imagespec):
assert imagespec in list(
imagespecs.keys()
), f"unknown image specification: {imagespec}"
self.imagespec = imagespec.lower()
def __call__(self, extension, data):
if extension.lower() not in "jpg jpeg png ppm pgm pbm pnm".split():
return None
try:
import numpy as np
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
"Package `numpy` is required to be installed for default image decoder."
"Please use `pip install numpy` to install the package"
) from e
try:
import PIL.Image
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
"Package `PIL` is required to be installed for default image decoder."
"Please use `pip install Pillow` to install the package"
) from e
imagespec = self.imagespec
atype, etype, mode = imagespecs[imagespec]
with io.BytesIO(data) as stream:
img = PIL.Image.open(stream)
img.load()
img = img.convert(mode.upper())
if atype == "pil":
return img
elif atype == "numpy":
result = np.asarray(img)
assert (
result.dtype == np.uint8
), f"numpy image array should be type uint8, but got {result.dtype}"
if etype == "uint8":
return result
else:
return result.astype("f") / 255.0
elif atype == "torch":
result = np.asarray(img)
assert (
result.dtype == np.uint8
), f"numpy image array should be type uint8, but got {result.dtype}"
if etype == "uint8":
result = np.array(result.transpose(2, 0, 1))
return torch.tensor(result)
else:
result = np.array(result.transpose(2, 0, 1))
return torch.tensor(result) / 255.0
return None
def imagehandler(imagespec):
return ImageHandler(imagespec)
################################################################
# torch video
################################################################
def videohandler(extension, data):
if extension not in "mp4 ogv mjpeg avi mov h264 mpg webm wmv".split():
return None
try:
import torchvision.io
except ImportError as e:
raise ModuleNotFoundError(
"Package `torchvision` is required to be installed for default video file loader."
"Please use `pip install torchvision` or `conda install torchvision -c pytorch`"
"to install the package"
) from e
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, f"file.{extension}")
with open(fname, "wb") as stream:
stream.write(data)
return torchvision.io.read_video(fname)
################################################################
# torchaudio
################################################################
def audiohandler(extension, data):
if extension not in ["flac", "mp3", "sox", "wav", "m4a", "ogg", "wma"]:
return None
try:
import torchaudio # type: ignore[import]
except ImportError as e:
raise ModuleNotFoundError(
"Package `torchaudio` is required to be installed for default audio file loader."
"Please use `pip install torchaudio` or `conda install torchaudio -c pytorch`"
"to install the package"
) from e
with tempfile.TemporaryDirectory() as dirname:
fname = os.path.join(dirname, f"file.{extension}")
with open(fname, "wb") as stream:
stream.write(data)
return torchaudio.load(fname)
################################################################
# mat
################################################################
class MatHandler:
def __init__(self, **loadmat_kwargs) -> None:
try:
import scipy.io as sio
except ImportError as e:
raise ModuleNotFoundError(
"Package `scipy` is required to be installed for mat file."
"Please use `pip install scipy` or `conda install scipy`"
"to install the package"
) from e
self.sio = sio
self.loadmat_kwargs = loadmat_kwargs
def __call__(self, extension, data):
if extension != "mat":
return None
with io.BytesIO(data) as stream:
return self.sio.loadmat(stream, **self.loadmat_kwargs)
def mathandler(**loadmat_kwargs):
return MatHandler(**loadmat_kwargs)
################################################################
# a sample decoder
################################################################
# Extract extension from pathname
def extension_extract_fn(pathname):
ext = os.path.splitext(pathname)[1]
# Remove dot
if ext:
ext = ext[1:]
return ext
class Decoder:
"""
Decode key/data sets using a list of handlers.
For each key/data item, this iterates through the list of
handlers until some handler returns something other than None.
"""
def __init__(self, *handler, key_fn=extension_extract_fn):
self.handlers = list(handler) if handler else []
self.key_fn = key_fn
# Insert new handler from the beginning of handlers list to make sure the new
# handler having the highest priority
def add_handler(self, *handler):
if not handler:
return
self.handlers = list(handler) + self.handlers
@staticmethod
def _is_stream_handle(data):
obj_to_check = data.file_obj if isinstance(data, StreamWrapper) else data
return isinstance(obj_to_check, (io.BufferedIOBase, io.RawIOBase))
def decode1(self, key, data):
if not data:
return data
# if data is a stream handle, we need to read all the content before decoding
if Decoder._is_stream_handle(data):
ds = data
# The behavior of .read can differ between streams (e.g. HTTPResponse), hence this is used instead
data = b"".join(data)
ds.close()
for f in self.handlers:
result = f(key, data)
if result is not None:
return result
return data
def decode(self, data):
result = {}
# single data tuple(pathname, data stream)
if isinstance(data, tuple):
data = [data]
if data is not None:
for k, v in data:
# TODO: xinyu, figure out why Nvidia do this?
if k[0] == "_":
if isinstance(v, bytes):
v = v.decode("utf-8")
result[k] = v
continue
result[k] = self.decode1(self.key_fn(k), v)
return result
def __call__(self, data):
return self.decode(data)
```
|
====================================================================================================================================
SOURCE CODE FILE: snapshot.py
LINES: 1
SIZE: 3.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\datapipes\utils\snapshot.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from torch.utils.data.datapipes._hook_iterator import _SnapshotState
from torch.utils.data.datapipes.datapipe import IterDataPipe
from torch.utils.data.graph_settings import apply_random_seed
# TODO: Caveats
# 1. Caller (either the ReadingService or DataLoader) must pass in the initial RNG
# 2. `in_batch_shuffle` and `bucketbatch` are not compatible with this because they currently
# lack the option to `set_seed`.
def _simple_graph_snapshot_restoration(
datapipe: IterDataPipe, n_iterations: int, rng=None
) -> None:
r"""
Fast-forward the given DataPipe and its parents by ``n_iterations``, re-doing computations to restore a snapshot.
For instance, applying this function to the final DataPipe of a graph will restore the snapshot
(via fast-forward) every DataPipe within the graph.
After you deserialize a DataPipe, you can use its `_number_of_samples_yielded` attribute as the input
to this function to forward the DataPipe.
A DataPipe cannot be restored twice in a row unless there is an iteration started between the restoration
attempts.
Note:
This is the simplest but least efficient way to fast-forward a DataPipe. Usage of other fast-forwarding
methods (custom ones if necessary) are recommended.
Args:
datapipe: IterDataPipe to be fast-forwarded
n_iterations: number of iterations to fast-forward
rng: ``Optional[torch.Generator]``. If not ``None``, this RNG will be used for shuffling. The generator
should be in its `initial` state as it was first passed into ``DataLoader`` or ``ReadingService``.
"""
if datapipe._snapshot_state == _SnapshotState.Restored:
raise RuntimeError(
"Snapshot restoration cannot be applied. You can only restore simple snapshot to the graph "
"if your graph has not been restored."
)
# For this snapshot restoration function, we want the DataPipe to be at its initial state prior to
# simple fast-forwarding. Therefore, we need to call `reset` twice, because if `SnapshotState` is `Restored`,
# the first reset will not actually reset.
datapipe.reset() # This ensures `SnapshotState` is `Iterating` by this point, even if it was `Restored`.
apply_random_seed(datapipe, rng)
remainder = n_iterations
it = iter(datapipe) # This always reset the DataPipe if it hasn't already.
while remainder > 0:
try:
next(it)
remainder -= 1
except StopIteration as e:
raise RuntimeError(
f"Fast-forward {datapipe} by {n_iterations} iterations "
"exceeds the number of samples available."
) from e
datapipe._fast_forward_iterator = it
# While the DataPipe has `_fast_forward_iterator`, `next()` will get result from there instead of elsewhere.
# This will prevent the DataPipe from resetting in the `iter()` call
# If another DataPipe is consuming it, it won't have to start over again
datapipe._snapshot_state = _SnapshotState.Restored
```
|
===================================================================================================================
SOURCE CODE FILE: dataset.py
LINES: 1
SIZE: 19.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\dataset.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import bisect
import itertools
import math
import warnings
from collections.abc import Sequence
# UP006 wants 'Iterable' to be imported from collections.abc but it needs to
# stay from typing for now due to BC concerns. In particular several internal
# targets fail to typecheck with:
# TypeError: Cannot create a consistent method resolution order (MRO) for
# bases Iterable, Generic
from typing import cast, Generic, Iterable, Optional, TypeVar, Union # noqa: UP035
from typing_extensions import deprecated
# No 'default_generator' in torch/__init__.pyi
from torch import default_generator, Generator, randperm, Tensor
__all__ = [
"Dataset",
"IterableDataset",
"TensorDataset",
"StackDataset",
"ConcatDataset",
"ChainDataset",
"Subset",
"random_split",
]
_T = TypeVar("_T")
_T_co = TypeVar("_T_co", covariant=True)
_T_dict = dict[str, _T_co]
_T_tuple = tuple[_T_co, ...]
_T_stack = TypeVar("_T_stack", _T_tuple, _T_dict)
class Dataset(Generic[_T_co]):
r"""An abstract class representing a :class:`Dataset`.
All datasets that represent a map from keys to data samples should subclass
it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a
data sample for a given key. Subclasses could also optionally overwrite
:meth:`__len__`, which is expected to return the size of the dataset by many
:class:`~torch.utils.data.Sampler` implementations and the default options
of :class:`~torch.utils.data.DataLoader`. Subclasses could also
optionally implement :meth:`__getitems__`, for speedup batched samples
loading. This method accepts list of indices of samples of batch and returns
list of samples.
.. note::
:class:`~torch.utils.data.DataLoader` by default constructs an index
sampler that yields integral indices. To make it work with a map-style
dataset with non-integral indices/keys, a custom sampler must be provided.
"""
def __getitem__(self, index) -> _T_co:
raise NotImplementedError("Subclasses of Dataset should implement __getitem__.")
# def __getitems__(self, indices: List) -> List[_T_co]:
# Not implemented to prevent false-positives in fetcher check in
# torch.utils.data._utils.fetch._MapDatasetFetcher
def __add__(self, other: "Dataset[_T_co]") -> "ConcatDataset[_T_co]":
return ConcatDataset([self, other])
# No `def __len__(self)` default?
# See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
# in pytorch/torch/utils/data/sampler.py
class IterableDataset(Dataset[_T_co], Iterable[_T_co]):
r"""An iterable Dataset.
All datasets that represent an iterable of data samples should subclass it.
Such form of datasets is particularly useful when data come from a stream.
All subclasses should overwrite :meth:`__iter__`, which would return an
iterator of samples in this dataset.
When a subclass is used with :class:`~torch.utils.data.DataLoader`, each
item in the dataset will be yielded from the :class:`~torch.utils.data.DataLoader`
iterator. When :attr:`num_workers > 0`, each worker process will have a
different copy of the dataset object, so it is often desired to configure
each copy independently to avoid having duplicate data returned from the
workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker
process, returns information about the worker. It can be used in either the
dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's
:attr:`worker_init_fn` option to modify each copy's behavior.
Example 1: splitting workload across all workers in :meth:`__iter__`::
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER)
>>> # xdoctest: +SKIP("Fails on MacOS12")
>>> class MyIterableDataset(torch.utils.data.IterableDataset):
... def __init__(self, start, end):
... super(MyIterableDataset).__init__()
... assert end > start, "this example code only works with end >= start"
... self.start = start
... self.end = end
...
... def __iter__(self):
... worker_info = torch.utils.data.get_worker_info()
... if worker_info is None: # single-process data loading, return the full iterator
... iter_start = self.start
... iter_end = self.end
... else: # in a worker process
... # split workload
... per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
... worker_id = worker_info.id
... iter_start = self.start + worker_id * per_worker
... iter_end = min(iter_start + per_worker, self.end)
... return iter(range(iter_start, iter_end))
...
>>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
>>> ds = MyIterableDataset(start=3, end=7)
>>> # Single-process loading
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
[tensor([3]), tensor([4]), tensor([5]), tensor([6])]
>>> # xdoctest: +REQUIRES(POSIX)
>>> # Multi-process loading with two worker processes
>>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6].
>>> # xdoctest: +IGNORE_WANT("non deterministic")
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
[tensor([3]), tensor([5]), tensor([4]), tensor([6])]
>>> # With even more workers
>>> # xdoctest: +IGNORE_WANT("non deterministic")
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=12)))
[tensor([3]), tensor([5]), tensor([4]), tensor([6])]
Example 2: splitting workload across all workers using :attr:`worker_init_fn`::
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER)
>>> class MyIterableDataset(torch.utils.data.IterableDataset):
... def __init__(self, start, end):
... super(MyIterableDataset).__init__()
... assert end > start, "this example code only works with end >= start"
... self.start = start
... self.end = end
...
... def __iter__(self):
... return iter(range(self.start, self.end))
...
>>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
>>> ds = MyIterableDataset(start=3, end=7)
>>> # Single-process loading
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
[3, 4, 5, 6]
>>>
>>> # Directly doing multi-process loading yields duplicate data
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
[3, 3, 4, 4, 5, 5, 6, 6]
>>> # Define a `worker_init_fn` that configures each dataset copy differently
>>> def worker_init_fn(worker_id):
... worker_info = torch.utils.data.get_worker_info()
... dataset = worker_info.dataset # the dataset copy in this worker process
... overall_start = dataset.start
... overall_end = dataset.end
... # configure the dataset to only process the split workload
... per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers)))
... worker_id = worker_info.id
... dataset.start = overall_start + worker_id * per_worker
... dataset.end = min(dataset.start + per_worker, overall_end)
...
>>> # Mult-process loading with the custom `worker_init_fn`
>>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6].
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=2, worker_init_fn=worker_init_fn)))
[3, 5, 4, 6]
>>> # With even more workers
>>> print(list(torch.utils.data.DataLoader(ds, num_workers=12, worker_init_fn=worker_init_fn)))
[3, 4, 5, 6]
"""
def __add__(self, other: Dataset[_T_co]):
return ChainDataset([self, other])
# No `def __len__(self)` default? Subclasses raise `TypeError` when needed.
# See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
class TensorDataset(Dataset[tuple[Tensor, ...]]):
r"""Dataset wrapping tensors.
Each sample will be retrieved by indexing tensors along the first dimension.
Args:
*tensors (Tensor): tensors that have the same size of the first dimension.
"""
tensors: tuple[Tensor, ...]
def __init__(self, *tensors: Tensor) -> None:
assert all(
tensors[0].size(0) == tensor.size(0) for tensor in tensors
), "Size mismatch between tensors"
self.tensors = tensors
def __getitem__(self, index):
return tuple(tensor[index] for tensor in self.tensors)
def __len__(self):
return self.tensors[0].size(0)
class StackDataset(Dataset[_T_stack]):
r"""Dataset as a stacking of multiple datasets.
This class is useful to assemble different parts of complex input data, given as datasets.
Example:
>>> # xdoctest: +SKIP
>>> images = ImageDataset()
>>> texts = TextDataset()
>>> tuple_stack = StackDataset(images, texts)
>>> tuple_stack[0] == (images[0], texts[0])
>>> dict_stack = StackDataset(image=images, text=texts)
>>> dict_stack[0] == {'image': images[0], 'text': texts[0]}
Args:
*args (Dataset): Datasets for stacking returned as tuple.
**kwargs (Dataset): Datasets for stacking returned as dict.
"""
datasets: Union[tuple, dict]
def __init__(self, *args: Dataset[_T_co], **kwargs: Dataset[_T_co]) -> None:
if args:
if kwargs:
raise ValueError(
"Supported either ``tuple``- (via ``args``) or"
"``dict``- (via ``kwargs``) like input/output, but both types are given."
)
self._length = len(args[0]) # type: ignore[arg-type]
if any(self._length != len(dataset) for dataset in args): # type: ignore[arg-type]
raise ValueError("Size mismatch between datasets")
self.datasets = args
elif kwargs:
tmp = list(kwargs.values())
self._length = len(tmp[0]) # type: ignore[arg-type]
if any(self._length != len(dataset) for dataset in tmp): # type: ignore[arg-type]
raise ValueError("Size mismatch between datasets")
self.datasets = kwargs
else:
raise ValueError("At least one dataset should be passed")
def __getitem__(self, index):
if isinstance(self.datasets, dict):
return {k: dataset[index] for k, dataset in self.datasets.items()}
return tuple(dataset[index] for dataset in self.datasets)
def __getitems__(self, indices: list):
# add batched sampling support when parent datasets supports it.
if isinstance(self.datasets, dict):
dict_batch: list[_T_dict] = [{} for _ in indices]
for k, dataset in self.datasets.items():
if callable(getattr(dataset, "__getitems__", None)):
items = dataset.__getitems__(indices) # type: ignore[attr-defined]
if len(items) != len(indices):
raise ValueError(
"Nested dataset's output size mismatch."
f" Expected {len(indices)}, got {len(items)}"
)
for data, d_sample in zip(items, dict_batch):
d_sample[k] = data
else:
for idx, d_sample in zip(indices, dict_batch):
d_sample[k] = dataset[idx]
return dict_batch
# tuple data
list_batch: list[list] = [[] for _ in indices]
for dataset in self.datasets:
if callable(getattr(dataset, "__getitems__", None)):
items = dataset.__getitems__(indices) # type: ignore[attr-defined]
if len(items) != len(indices):
raise ValueError(
"Nested dataset's output size mismatch."
f" Expected {len(indices)}, got {len(items)}"
)
for data, t_sample in zip(items, list_batch):
t_sample.append(data)
else:
for idx, t_sample in zip(indices, list_batch):
t_sample.append(dataset[idx])
tuple_batch: list[_T_tuple] = [tuple(sample) for sample in list_batch]
return tuple_batch
def __len__(self):
return self._length
class ConcatDataset(Dataset[_T_co]):
r"""Dataset as a concatenation of multiple datasets.
This class is useful to assemble different existing datasets.
Args:
datasets (sequence): List of datasets to be concatenated
"""
datasets: list[Dataset[_T_co]]
cumulative_sizes: list[int]
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
def __init__(self, datasets: Iterable[Dataset]) -> None:
super().__init__()
self.datasets = list(datasets)
assert len(self.datasets) > 0, "datasets should not be an empty iterable" # type: ignore[arg-type]
for d in self.datasets:
assert not isinstance(
d, IterableDataset
), "ConcatDataset does not support IterableDataset"
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError(
"absolute value of index should not exceed dataset length"
)
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx]
@property
@deprecated(
"`cummulative_sizes` attribute is renamed to `cumulative_sizes`",
category=FutureWarning,
)
def cummulative_sizes(self):
return self.cumulative_sizes
class ChainDataset(IterableDataset):
r"""Dataset for chaining multiple :class:`IterableDataset` s.
This class is useful to assemble different existing dataset streams. The
chaining operation is done on-the-fly, so concatenating large-scale
datasets with this class will be efficient.
Args:
datasets (iterable of IterableDataset): datasets to be chained together
"""
def __init__(self, datasets: Iterable[Dataset]) -> None:
super().__init__()
self.datasets = datasets
def __iter__(self):
for d in self.datasets:
assert isinstance(
d, IterableDataset
), "ChainDataset only supports IterableDataset"
yield from d
def __len__(self):
total = 0
for d in self.datasets:
assert isinstance(
d, IterableDataset
), "ChainDataset only supports IterableDataset"
total += len(d) # type: ignore[arg-type]
return total
class Subset(Dataset[_T_co]):
r"""
Subset of a dataset at specified indices.
Args:
dataset (Dataset): The whole Dataset
indices (sequence): Indices in the whole set selected for subset
"""
dataset: Dataset[_T_co]
indices: Sequence[int]
def __init__(self, dataset: Dataset[_T_co], indices: Sequence[int]) -> None:
self.dataset = dataset
self.indices = indices
def __getitem__(self, idx):
if isinstance(idx, list):
return self.dataset[[self.indices[i] for i in idx]]
return self.dataset[self.indices[idx]]
def __getitems__(self, indices: list[int]) -> list[_T_co]:
# add batched sampling support when parent dataset supports it.
# see torch.utils.data._utils.fetch._MapDatasetFetcher
if callable(getattr(self.dataset, "__getitems__", None)):
return self.dataset.__getitems__([self.indices[idx] for idx in indices]) # type: ignore[attr-defined]
else:
return [self.dataset[self.indices[idx]] for idx in indices]
def __len__(self):
return len(self.indices)
def random_split(
dataset: Dataset[_T],
lengths: Sequence[Union[int, float]],
generator: Optional[Generator] = default_generator,
) -> list[Subset[_T]]:
r"""
Randomly split a dataset into non-overlapping new datasets of given lengths.
If a list of fractions that sum up to 1 is given,
the lengths will be computed automatically as
floor(frac * len(dataset)) for each fraction provided.
After computing the lengths, if there are any remainders, 1 count will be
distributed in round-robin fashion to the lengths
until there are no remainders left.
Optionally fix the generator for reproducible results, e.g.:
Example:
>>> # xdoctest: +SKIP
>>> generator1 = torch.Generator().manual_seed(42)
>>> generator2 = torch.Generator().manual_seed(42)
>>> random_split(range(10), [3, 7], generator=generator1)
>>> random_split(range(30), [0.3, 0.3, 0.4], generator=generator2)
Args:
dataset (Dataset): Dataset to be split
lengths (sequence): lengths or fractions of splits to be produced
generator (Generator): Generator used for the random permutation.
"""
if math.isclose(sum(lengths), 1) and sum(lengths) <= 1:
subset_lengths: list[int] = []
for i, frac in enumerate(lengths):
if frac < 0 or frac > 1:
raise ValueError(f"Fraction at index {i} is not between 0 and 1")
n_items_in_split = int(
math.floor(len(dataset) * frac) # type: ignore[arg-type]
)
subset_lengths.append(n_items_in_split)
remainder = len(dataset) - sum(subset_lengths) # type: ignore[arg-type]
# add 1 to all the lengths in round-robin fashion until the remainder is 0
for i in range(remainder):
idx_to_add_at = i % len(subset_lengths)
subset_lengths[idx_to_add_at] += 1
lengths = subset_lengths
for i, length in enumerate(lengths):
if length == 0:
warnings.warn(
f"Length of split at index {i} is 0. "
f"This might result in an empty dataset."
)
# Cannot verify that dataset is Sized
if sum(lengths) != len(dataset): # type: ignore[arg-type]
raise ValueError(
"Sum of input lengths does not equal the length of the input dataset!"
)
indices = randperm(sum(lengths), generator=generator).tolist() # type: ignore[arg-type, call-overload]
lengths = cast(Sequence[int], lengths)
return [
Subset(dataset, indices[offset - length : offset])
for offset, length in zip(itertools.accumulate(lengths), lengths)
]
```
|
=======================================================================================================================
SOURCE CODE FILE: distributed.py
LINES: 1
SIZE: 6.13 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\distributed.py
ENCODING: utf-8
```py
import math
from collections.abc import Iterator
from typing import Optional, TypeVar
import torch
import torch.distributed as dist
from torch.utils.data.dataset import Dataset
from torch.utils.data.sampler import Sampler
__all__ = ["DistributedSampler"]
_T_co = TypeVar("_T_co", covariant=True)
class DistributedSampler(Sampler[_T_co]):
r"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such a case, each
process can pass a :class:`~torch.utils.data.DistributedSampler` instance as a
:class:`~torch.utils.data.DataLoader` sampler, and load a subset of the
original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size and that any instance of it always
returns the same elements in the same order.
Args:
dataset: Dataset used for sampling.
num_replicas (int, optional): Number of processes participating in
distributed training. By default, :attr:`world_size` is retrieved from the
current distributed group.
rank (int, optional): Rank of the current process within :attr:`num_replicas`.
By default, :attr:`rank` is retrieved from the current distributed
group.
shuffle (bool, optional): If ``True`` (default), sampler will shuffle the
indices.
seed (int, optional): random seed used to shuffle the sampler if
:attr:`shuffle=True`. This number should be identical across all
processes in the distributed group. Default: ``0``.
drop_last (bool, optional): if ``True``, then the sampler will drop the
tail of the data to make it evenly divisible across the number of
replicas. If ``False``, the sampler will add extra indices to make
the data evenly divisible across the replicas. Default: ``False``.
.. warning::
In distributed mode, calling the :meth:`set_epoch` method at
the beginning of each epoch **before** creating the :class:`DataLoader` iterator
is necessary to make shuffling work properly across multiple epochs. Otherwise,
the same ordering will be always used.
Example::
>>> # xdoctest: +SKIP
>>> sampler = DistributedSampler(dataset) if is_distributed else None
>>> loader = DataLoader(dataset, shuffle=(sampler is None),
... sampler=sampler)
>>> for epoch in range(start_epoch, n_epochs):
... if is_distributed:
... sampler.set_epoch(epoch)
... train(loader)
"""
def __init__(
self,
dataset: Dataset,
num_replicas: Optional[int] = None,
rank: Optional[int] = None,
shuffle: bool = True,
seed: int = 0,
drop_last: bool = False,
) -> None:
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
if rank >= num_replicas or rank < 0:
raise ValueError(
f"Invalid rank {rank}, rank should be in the interval [0, {num_replicas - 1}]"
)
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.drop_last = drop_last
# If the dataset length is evenly divisible by # of replicas, then there
# is no need to drop any data, since the dataset will be split equally.
if self.drop_last and len(self.dataset) % self.num_replicas != 0: # type: ignore[arg-type]
# Split to nearest available length that is evenly divisible.
# This is to ensure each rank receives the same amount of data when
# using this Sampler.
self.num_samples = math.ceil(
(len(self.dataset) - self.num_replicas) / self.num_replicas # type: ignore[arg-type]
)
else:
self.num_samples = math.ceil(len(self.dataset) / self.num_replicas) # type: ignore[arg-type]
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.seed = seed
def __iter__(self) -> Iterator[_T_co]:
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist() # type: ignore[arg-type]
else:
indices = list(range(len(self.dataset))) # type: ignore[arg-type]
if not self.drop_last:
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size <= len(indices):
indices += indices[:padding_size]
else:
indices += (indices * math.ceil(padding_size / len(indices)))[
:padding_size
]
else:
# remove tail of data to make it evenly divisible.
indices = indices[: self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self) -> int:
return self.num_samples
def set_epoch(self, epoch: int) -> None:
r"""
Set the epoch for this sampler.
When :attr:`shuffle=True`, this ensures all replicas
use a different random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Args:
epoch (int): Epoch number.
"""
self.epoch = epoch
```
|
=================================================================================================================
SOURCE CODE FILE: graph.py
LINES: 1
SIZE: 5.82 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\graph.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import io
import pickle
import warnings
from collections.abc import Collection
from typing import Optional, Union
from torch.utils._import_utils import dill_available
from torch.utils.data.datapipes.datapipe import IterDataPipe, MapDataPipe
__all__ = ["traverse", "traverse_dps"]
DataPipe = Union[IterDataPipe, MapDataPipe]
DataPipeGraph = dict[int, tuple[DataPipe, "DataPipeGraph"]]
def _stub_unpickler():
return "STUB"
# TODO(VitalyFedyunin): Make sure it works without dill module installed
def _list_connected_datapipes(
scan_obj: DataPipe, only_datapipe: bool, cache: set[int]
) -> list[DataPipe]:
f = io.BytesIO()
p = pickle.Pickler(
f
) # Not going to work for lambdas, but dill infinite loops on typing and can't be used as is
if dill_available():
from dill import Pickler as dill_Pickler
d = dill_Pickler(f)
else:
d = None
captured_connections = []
def getstate_hook(ori_state):
state = None
if isinstance(ori_state, dict):
state = {}
for k, v in ori_state.items():
if isinstance(v, (IterDataPipe, MapDataPipe, Collection)):
state[k] = v
elif isinstance(ori_state, (tuple, list)):
state = [] # type: ignore[assignment]
for v in ori_state:
if isinstance(v, (IterDataPipe, MapDataPipe, Collection)):
state.append(v) # type: ignore[attr-defined]
elif isinstance(ori_state, (IterDataPipe, MapDataPipe, Collection)):
state = ori_state # type: ignore[assignment]
return state
def reduce_hook(obj):
if obj == scan_obj or id(obj) in cache:
raise NotImplementedError
else:
captured_connections.append(obj)
# Adding id to remove duplicate DataPipe serialized at the same level
cache.add(id(obj))
return _stub_unpickler, ()
datapipe_classes: tuple[type[DataPipe]] = (IterDataPipe, MapDataPipe) # type: ignore[assignment]
try:
for cls in datapipe_classes:
cls.set_reduce_ex_hook(reduce_hook)
if only_datapipe:
cls.set_getstate_hook(getstate_hook)
try:
p.dump(scan_obj)
except (pickle.PickleError, AttributeError, TypeError):
if dill_available():
d.dump(scan_obj)
else:
raise
finally:
for cls in datapipe_classes:
cls.set_reduce_ex_hook(None)
if only_datapipe:
cls.set_getstate_hook(None)
if dill_available():
from dill import extend as dill_extend
dill_extend(False) # Undo change to dispatch table
return captured_connections
def traverse_dps(datapipe: DataPipe) -> DataPipeGraph:
r"""
Traverse the DataPipes and their attributes to extract the DataPipe graph.
This only looks into the attribute from each DataPipe that is either a
DataPipe and a Python collection object such as ``list``, ``tuple``,
``set`` and ``dict``.
Args:
datapipe: the end DataPipe of the graph
Returns:
A graph represented as a nested dictionary, where keys are ids of DataPipe instances
and values are tuples of DataPipe instance and the sub-graph
"""
cache: set[int] = set()
return _traverse_helper(datapipe, only_datapipe=True, cache=cache)
def traverse(datapipe: DataPipe, only_datapipe: Optional[bool] = None) -> DataPipeGraph:
r"""
Traverse the DataPipes and their attributes to extract the DataPipe graph.
[Deprecated]
When ``only_dataPipe`` is specified as ``True``, it would only look into the
attribute from each DataPipe that is either a DataPipe and a Python collection object
such as ``list``, ``tuple``, ``set`` and ``dict``.
Note:
This function is deprecated. Please use `traverse_dps` instead.
Args:
datapipe: the end DataPipe of the graph
only_datapipe: If ``False`` (default), all attributes of each DataPipe are traversed.
This argument is deprecating and will be removed after the next release.
Returns:
A graph represented as a nested dictionary, where keys are ids of DataPipe instances
and values are tuples of DataPipe instance and the sub-graph
"""
msg = (
"`traverse` function and will be removed after 1.13. "
"Please use `traverse_dps` instead."
)
if not only_datapipe:
msg += " And, the behavior will be changed to the equivalent of `only_datapipe=True`."
warnings.warn(msg, FutureWarning)
if only_datapipe is None:
only_datapipe = False
cache: set[int] = set()
return _traverse_helper(datapipe, only_datapipe, cache)
# Add cache here to prevent infinite recursion on DataPipe
def _traverse_helper(
datapipe: DataPipe, only_datapipe: bool, cache: set[int]
) -> DataPipeGraph:
if not isinstance(datapipe, (IterDataPipe, MapDataPipe)):
raise RuntimeError(
f"Expected `IterDataPipe` or `MapDataPipe`, but {type(datapipe)} is found"
)
dp_id = id(datapipe)
if dp_id in cache:
return {}
cache.add(dp_id)
# Using cache.copy() here is to prevent the same DataPipe pollutes the cache on different paths
items = _list_connected_datapipes(datapipe, only_datapipe, cache.copy())
d: DataPipeGraph = {dp_id: (datapipe, {})}
for item in items:
# Using cache.copy() here is to prevent recursion on a single path rather than global graph
# Single DataPipe can present multiple times in different paths in graph
d[dp_id][1].update(_traverse_helper(item, only_datapipe, cache.copy()))
return d
```
|
==========================================================================================================================
SOURCE CODE FILE: graph_settings.py
LINES: 1
SIZE: 5.58 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\graph_settings.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import inspect
import warnings
from typing import Any, Optional
from typing_extensions import deprecated
import torch
from torch.utils.data.datapipes.iter.sharding import (
_ShardingIterDataPipe,
SHARDING_PRIORITIES,
)
from torch.utils.data.graph import DataPipe, DataPipeGraph, traverse_dps
__all__ = [
"apply_random_seed",
"apply_sharding",
"apply_shuffle_seed",
"apply_shuffle_settings",
"get_all_graph_pipes",
]
def get_all_graph_pipes(graph: DataPipeGraph) -> list[DataPipe]:
return _get_all_graph_pipes_helper(graph, set())
def _get_all_graph_pipes_helper(
graph: DataPipeGraph, id_cache: set[int]
) -> list[DataPipe]:
results: list[DataPipe] = []
for dp_id, (datapipe, sub_graph) in graph.items():
if dp_id in id_cache:
continue
id_cache.add(dp_id)
results.append(datapipe)
results.extend(_get_all_graph_pipes_helper(sub_graph, id_cache))
return results
def _is_sharding_datapipe(datapipe: DataPipe) -> bool:
return isinstance(datapipe, _ShardingIterDataPipe) or (
hasattr(datapipe, "apply_sharding")
and inspect.ismethod(datapipe.apply_sharding)
)
def apply_sharding(
datapipe: DataPipe,
num_of_instances: int,
instance_id: int,
sharding_group=SHARDING_PRIORITIES.DEFAULT,
) -> DataPipe:
r"""
Apply dynamic sharding over the ``sharding_filter`` DataPipe that has a method ``apply_sharding``.
RuntimeError will be raised when multiple ``sharding_filter`` are presented in the same branch.
"""
graph = traverse_dps(datapipe)
def _helper(graph, prev_applied=None):
for dp, sub_graph in graph.values():
applied = None
if _is_sharding_datapipe(dp):
if prev_applied is not None:
raise RuntimeError(
"Sharding twice on a single pipeline is likely unintended and will cause data loss. "
f"Sharding already applied to {prev_applied} while trying to apply to {dp}"
)
# For BC, only provide sharding_group if accepted
sig = inspect.signature(dp.apply_sharding)
if len(sig.parameters) < 3:
dp.apply_sharding(num_of_instances, instance_id)
else:
dp.apply_sharding(
num_of_instances, instance_id, sharding_group=sharding_group
)
applied = dp
if applied is None:
applied = prev_applied
_helper(sub_graph, applied)
_helper(graph)
return datapipe
def _is_shuffle_datapipe(datapipe: DataPipe) -> bool:
return (
hasattr(datapipe, "set_shuffle")
and hasattr(datapipe, "set_seed")
and inspect.ismethod(datapipe.set_shuffle)
and inspect.ismethod(datapipe.set_seed)
)
def apply_shuffle_settings(
datapipe: DataPipe, shuffle: Optional[bool] = None
) -> DataPipe:
r"""
Traverse the graph of ``DataPipes`` to find and set shuffle attribute.
Apply the method to each `DataPipe` that has APIs of ``set_shuffle``
and ``set_seed``.
Args:
datapipe: DataPipe that needs to set shuffle attribute
shuffle: Shuffle option (default: ``None`` and no-op to the graph)
"""
if shuffle is None:
return datapipe
graph = traverse_dps(datapipe)
all_pipes = get_all_graph_pipes(graph)
shufflers = [pipe for pipe in all_pipes if _is_shuffle_datapipe(pipe)]
if not shufflers and shuffle:
warnings.warn(
"`shuffle=True` was set, but the datapipe does not contain a `Shuffler`. Adding one at the end. "
"Be aware that the default buffer size might not be sufficient for your task."
)
datapipe = datapipe.shuffle()
shufflers = [
datapipe,
]
for shuffler in shufflers:
shuffler.set_shuffle(shuffle)
return datapipe
@deprecated(
"`apply_shuffle_seed` is deprecated since 1.12 and will be removed in the future releases. "
"Please use `apply_random_seed` instead.",
category=FutureWarning,
)
def apply_shuffle_seed(datapipe: DataPipe, rng: Any) -> DataPipe:
return apply_random_seed(datapipe, rng)
def _is_random_datapipe(datapipe: DataPipe) -> bool:
return hasattr(datapipe, "set_seed") and inspect.ismethod(datapipe.set_seed)
def apply_random_seed(datapipe: DataPipe, rng: torch.Generator) -> DataPipe:
r"""
Traverse the graph of ``DataPipes`` to find random ``DataPipe`` with an API of ``set_seed``.
Then set the random seed based on the provided RNG to those ``DataPipe``.
Args:
datapipe: DataPipe that needs to set randomness
rng: Random number generator to generate random seeds
"""
graph = traverse_dps(datapipe)
all_pipes = get_all_graph_pipes(graph)
# Using a set to track id of DataPipe to prevent setting randomness per DataPipe more than once.
# And, `id` is used in case of unhashable DataPipe
cache = set()
random_datapipes = []
for pipe in all_pipes:
if id(pipe) in cache:
continue
if _is_random_datapipe(pipe):
random_datapipes.append(pipe)
cache.add(id(pipe))
for pipe in random_datapipes:
random_seed = int(
torch.empty((), dtype=torch.int64).random_(generator=rng).item()
)
pipe.set_seed(random_seed)
return datapipe
```
|
===================================================================================================================
SOURCE CODE FILE: sampler.py
LINES: 1
SIZE: 12.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\data\sampler.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import itertools
from collections.abc import Iterable, Iterator, Sequence, Sized
from typing import Generic, Optional, TypeVar, Union
import torch
__all__ = [
"BatchSampler",
"RandomSampler",
"Sampler",
"SequentialSampler",
"SubsetRandomSampler",
"WeightedRandomSampler",
]
_T_co = TypeVar("_T_co", covariant=True)
class Sampler(Generic[_T_co]):
r"""Base class for all Samplers.
Every Sampler subclass has to provide an :meth:`__iter__` method, providing a
way to iterate over indices or lists of indices (batches) of dataset elements,
and may provide a :meth:`__len__` method that returns the length of the returned iterators.
Args:
data_source (Dataset): This argument is not used and will be removed in 2.2.0.
You may still have custom implementation that utilizes it.
Example:
>>> # xdoctest: +SKIP
>>> class AccedingSequenceLengthSampler(Sampler[int]):
>>> def __init__(self, data: List[str]) -> None:
>>> self.data = data
>>>
>>> def __len__(self) -> int:
>>> return len(self.data)
>>>
>>> def __iter__(self) -> Iterator[int]:
>>> sizes = torch.tensor([len(x) for x in self.data])
>>> yield from torch.argsort(sizes).tolist()
>>>
>>> class AccedingSequenceLengthBatchSampler(Sampler[List[int]]):
>>> def __init__(self, data: List[str], batch_size: int) -> None:
>>> self.data = data
>>> self.batch_size = batch_size
>>>
>>> def __len__(self) -> int:
>>> return (len(self.data) + self.batch_size - 1) // self.batch_size
>>>
>>> def __iter__(self) -> Iterator[List[int]]:
>>> sizes = torch.tensor([len(x) for x in self.data])
>>> for batch in torch.chunk(torch.argsort(sizes), len(self)):
>>> yield batch.tolist()
.. note:: The :meth:`__len__` method isn't strictly required by
:class:`~torch.utils.data.DataLoader`, but is expected in any
calculation involving the length of a :class:`~torch.utils.data.DataLoader`.
"""
def __init__(self, data_source: Optional[Sized] = None) -> None:
if data_source is not None:
import warnings
warnings.warn(
"`data_source` argument is not used and will be removed in 2.2.0."
"You may still have custom implementation that utilizes it."
)
def __iter__(self) -> Iterator[_T_co]:
raise NotImplementedError
# NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
#
# Many times we have an abstract class representing a collection/iterable of
# data, e.g., `torch.utils.data.Sampler`, with its subclasses optionally
# implementing a `__len__` method. In such cases, we must make sure to not
# provide a default implementation, because both straightforward default
# implementations have their issues:
#
# + `return NotImplemented`:
# Calling `len(subclass_instance)` raises:
# TypeError: 'NotImplementedType' object cannot be interpreted as an integer
#
# + `raise NotImplementedError`:
# This prevents triggering some fallback behavior. E.g., the built-in
# `list(X)` tries to call `len(X)` first, and executes a different code
# path if the method is not found or `NotImplemented` is returned, while
# raising a `NotImplementedError` will propagate and make the call fail
# where it could have used `__iter__` to complete the call.
#
# Thus, the only two sensible things to do are
#
# + **not** provide a default `__len__`.
#
# + raise a `TypeError` instead, which is what Python uses when users call
# a method that is not defined on an object.
# (@ssnl verifies that this works on at least Python 3.7.)
class SequentialSampler(Sampler[int]):
r"""Samples elements sequentially, always in the same order.
Args:
data_source (Dataset): dataset to sample from
"""
data_source: Sized
def __init__(self, data_source: Sized) -> None:
self.data_source = data_source
def __iter__(self) -> Iterator[int]:
return iter(range(len(self.data_source)))
def __len__(self) -> int:
return len(self.data_source)
class RandomSampler(Sampler[int]):
r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify :attr:`num_samples` to draw.
Args:
data_source (Dataset): dataset to sample from
replacement (bool): samples are drawn on-demand with replacement if ``True``, default=``False``
num_samples (int): number of samples to draw, default=`len(dataset)`.
generator (Generator): Generator used in sampling.
"""
data_source: Sized
replacement: bool
def __init__(
self,
data_source: Sized,
replacement: bool = False,
num_samples: Optional[int] = None,
generator=None,
) -> None:
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
self.generator = generator
if not isinstance(self.replacement, bool):
raise TypeError(
f"replacement should be a boolean value, but got replacement={self.replacement}"
)
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError(
f"num_samples should be a positive integer value, but got num_samples={self.num_samples}"
)
@property
def num_samples(self) -> int:
# dataset size might change at runtime
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self) -> Iterator[int]:
n = len(self.data_source)
if self.generator is None:
seed = int(torch.empty((), dtype=torch.int64).random_().item())
generator = torch.Generator()
generator.manual_seed(seed)
else:
generator = self.generator
if self.replacement:
for _ in range(self.num_samples // 32):
yield from torch.randint(
high=n, size=(32,), dtype=torch.int64, generator=generator
).tolist()
yield from torch.randint(
high=n,
size=(self.num_samples % 32,),
dtype=torch.int64,
generator=generator,
).tolist()
else:
for _ in range(self.num_samples // n):
yield from torch.randperm(n, generator=generator).tolist()
yield from torch.randperm(n, generator=generator).tolist()[
: self.num_samples % n
]
def __len__(self) -> int:
return self.num_samples
class SubsetRandomSampler(Sampler[int]):
r"""Samples elements randomly from a given list of indices, without replacement.
Args:
indices (sequence): a sequence of indices
generator (Generator): Generator used in sampling.
"""
indices: Sequence[int]
def __init__(self, indices: Sequence[int], generator=None) -> None:
self.indices = indices
self.generator = generator
def __iter__(self) -> Iterator[int]:
for i in torch.randperm(len(self.indices), generator=self.generator):
yield self.indices[i]
def __len__(self) -> int:
return len(self.indices)
class WeightedRandomSampler(Sampler[int]):
r"""Samples elements from ``[0,..,len(weights)-1]`` with given probabilities (weights).
Args:
weights (sequence) : a sequence of weights, not necessary summing up to one
num_samples (int): number of samples to draw
replacement (bool): if ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
generator (Generator): Generator used in sampling.
Example:
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True))
[4, 4, 1, 4, 5]
>>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False))
[0, 1, 4, 3, 2]
"""
weights: torch.Tensor
num_samples: int
replacement: bool
def __init__(
self,
weights: Sequence[float],
num_samples: int,
replacement: bool = True,
generator=None,
) -> None:
if (
not isinstance(num_samples, int)
or isinstance(num_samples, bool)
or num_samples <= 0
):
raise ValueError(
f"num_samples should be a positive integer value, but got num_samples={num_samples}"
)
if not isinstance(replacement, bool):
raise ValueError(
f"replacement should be a boolean value, but got replacement={replacement}"
)
weights_tensor = torch.as_tensor(weights, dtype=torch.double)
if len(weights_tensor.shape) != 1:
raise ValueError(
"weights should be a 1d sequence but given "
f"weights have shape {tuple(weights_tensor.shape)}"
)
self.weights = weights_tensor
self.num_samples = num_samples
self.replacement = replacement
self.generator = generator
def __iter__(self) -> Iterator[int]:
rand_tensor = torch.multinomial(
self.weights, self.num_samples, self.replacement, generator=self.generator
)
yield from iter(rand_tensor.tolist())
def __len__(self) -> int:
return self.num_samples
class BatchSampler(Sampler[list[int]]):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler or Iterable): Base sampler. Can be any iterable object
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(
self,
sampler: Union[Sampler[int], Iterable[int]],
batch_size: int,
drop_last: bool,
) -> None:
# Since collections.abc.Iterable does not check for `__getitem__`, which
# is one way for an object to be an iterable, we don't do an `isinstance`
# check here.
if (
not isinstance(batch_size, int)
or isinstance(batch_size, bool)
or batch_size <= 0
):
raise ValueError(
f"batch_size should be a positive integer value, but got batch_size={batch_size}"
)
if not isinstance(drop_last, bool):
raise ValueError(
f"drop_last should be a boolean value, but got drop_last={drop_last}"
)
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self) -> Iterator[list[int]]:
# Implemented based on the benchmarking in https://github.com/pytorch/pytorch/pull/76951
sampler_iter = iter(self.sampler)
if self.drop_last:
# Create multiple references to the same iterator
args = [sampler_iter] * self.batch_size
for batch_droplast in zip(*args):
yield [*batch_droplast]
else:
batch = [*itertools.islice(sampler_iter, self.batch_size)]
while batch:
yield batch
batch = [*itertools.islice(sampler_iter, self.batch_size)]
def __len__(self) -> int:
# Can only be called if self.sampler has __len__ implemented
# We cannot enforce this condition, so we turn off typechecking for the
# implementation below.
# Somewhat related: see NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
if self.drop_last:
return len(self.sampler) // self.batch_size # type: ignore[arg-type]
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size # type: ignore[arg-type]
```
|
====================================================================================================================
SOURCE CODE FILE: deterministic.py
LINES: 1
SIZE: 0.62 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\deterministic.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import sys
import types
import torch
class _Deterministic(types.ModuleType):
@property
def fill_uninitialized_memory(self):
"""
Whether to fill uninitialized memory with a known value when
:meth:`torch.use_deterministic_algorithms()` is set to ``True``.
"""
return torch._C._get_deterministic_fill_uninitialized_memory()
@fill_uninitialized_memory.setter
def fill_uninitialized_memory(self, mode):
return torch._C._set_deterministic_fill_uninitialized_memory(mode)
sys.modules[__name__].__class__ = _Deterministic
```
|
=============================================================================================================
SOURCE CODE FILE: dlpack.py
LINES: 1
SIZE: 4.45 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\dlpack.py
ENCODING: utf-8
```py
from typing import Any
import torch
import enum
from torch._C import _from_dlpack
from torch._C import _to_dlpack as to_dlpack
class DLDeviceType(enum.IntEnum):
# Enums as in DLPack specification (aten/src/ATen/dlpack.h)
kDLCPU = 1,
kDLGPU = 2,
kDLCPUPinned = 3,
kDLOpenCL = 4,
kDLVulkan = 7,
kDLMetal = 8,
kDLVPI = 9,
kDLROCM = 10,
kDLExtDev = 12,
kDLOneAPI = 14,
torch._C._add_docstr(to_dlpack, r"""to_dlpack(tensor) -> PyCapsule
Returns an opaque object (a "DLPack capsule") representing the tensor.
.. note::
``to_dlpack`` is a legacy DLPack interface. The capsule it returns
cannot be used for anything in Python other than use it as input to
``from_dlpack``. The more idiomatic use of DLPack is to call
``from_dlpack`` directly on the tensor object - this works when that
object has a ``__dlpack__`` method, which PyTorch and most other
libraries indeed have now.
.. warning::
Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``.
Behavior when a capsule is consumed multiple times is undefined.
Args:
tensor: a tensor to be exported
The DLPack capsule shares the tensor's memory.
""")
# TODO: add a typing.Protocol to be able to tell Mypy that only objects with
# __dlpack__ and __dlpack_device__ methods are accepted.
def from_dlpack(ext_tensor: Any) -> 'torch.Tensor':
"""from_dlpack(ext_tensor) -> Tensor
Converts a tensor from an external library into a ``torch.Tensor``.
The returned PyTorch tensor will share the memory with the input tensor
(which may have come from another library). Note that in-place operations
will therefore also affect the data of the input tensor. This may lead to
unexpected issues (e.g., other libraries may have read-only flags or
immutable data structures), so the user should only do this if they know
for sure that this is fine.
Args:
ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule):
The tensor or DLPack capsule to convert.
If ``ext_tensor`` is a tensor (or ndarray) object, it must support
the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__``
method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is
an opaque ``PyCapsule`` instance, typically produced by a
``to_dlpack`` function or method.
Examples::
>>> import torch.utils.dlpack
>>> t = torch.arange(4)
# Convert a tensor directly (supported in PyTorch >= 1.10)
>>> t2 = torch.from_dlpack(t)
>>> t2[:2] = -1 # show that memory is shared
>>> t2
tensor([-1, -1, 2, 3])
>>> t
tensor([-1, -1, 2, 3])
# The old-style DLPack usage, with an intermediate capsule object
>>> capsule = torch.utils.dlpack.to_dlpack(t)
>>> capsule
<capsule object "dltensor" at ...>
>>> t3 = torch.from_dlpack(capsule)
>>> t3
tensor([-1, -1, 2, 3])
>>> t3[0] = -9 # now we're sharing memory between 3 tensors
>>> t3
tensor([-9, -1, 2, 3])
>>> t2
tensor([-9, -1, 2, 3])
>>> t
tensor([-9, -1, 2, 3])
"""
if hasattr(ext_tensor, '__dlpack__'):
device = ext_tensor.__dlpack_device__()
# device is either CUDA or ROCm, we need to pass the current
# stream
if device[0] in (DLDeviceType.kDLGPU, DLDeviceType.kDLROCM):
stream = torch.cuda.current_stream(f'cuda:{device[1]}')
# cuda_stream is the pointer to the stream and it is a public
# attribute, but it is not documented
# The array API specify that the default legacy stream must be passed
# with a value of 1 for CUDA
# https://data-apis.org/array-api/latest/API_specification/array_object.html?dlpack-self-stream-none#dlpack-self-stream-none
is_cuda = device[0] == DLDeviceType.kDLGPU
# Since pytorch is not using PTDS by default, lets directly pass
# the legacy stream
stream_ptr = 1 if is_cuda and stream.cuda_stream == 0 else stream.cuda_stream
dlpack = ext_tensor.__dlpack__(stream=stream_ptr)
else:
dlpack = ext_tensor.__dlpack__()
else:
# Old versions just call the converter
dlpack = ext_tensor
return _from_dlpack(dlpack)
```
|
=================================================================================================================
SOURCE CODE FILE: file_baton.py
LINES: 1
SIZE: 1.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\file_baton.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import os
import time
class FileBaton:
"""A primitive, file-based synchronization utility."""
def __init__(self, lock_file_path, wait_seconds=0.1):
"""
Create a new :class:`FileBaton`.
Args:
lock_file_path: The path to the file used for locking.
wait_seconds: The seconds to periodically sleep (spin) when
calling ``wait()``.
"""
self.lock_file_path = lock_file_path
self.wait_seconds = wait_seconds
self.fd = None
def try_acquire(self):
"""
Try to atomically create a file under exclusive access.
Returns:
True if the file could be created, else False.
"""
try:
self.fd = os.open(self.lock_file_path, os.O_CREAT | os.O_EXCL)
return True
except FileExistsError:
return False
def wait(self):
"""
Periodically sleeps for a certain amount until the baton is released.
The amount of time slept depends on the ``wait_seconds`` parameter
passed to the constructor.
"""
while os.path.exists(self.lock_file_path):
time.sleep(self.wait_seconds)
def release(self):
"""Release the baton and removes its file."""
if self.fd is not None:
os.close(self.fd)
os.remove(self.lock_file_path)
```
|
===================================================================================================================
SOURCE CODE FILE: flop_counter.py
LINES: 1
SIZE: 28.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\flop_counter.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch
from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten
from .module_tracker import ModuleTracker
from typing import Any, Optional, Union, TypeVar, Callable
from collections.abc import Iterator
from typing_extensions import ParamSpec
from collections import defaultdict
from torch.utils._python_dispatch import TorchDispatchMode
from math import prod
from functools import wraps
import warnings
__all__ = ["FlopCounterMode", "register_flop_formula"]
_T = TypeVar("_T")
_P = ParamSpec("_P")
aten = torch.ops.aten
def get_shape(i):
if isinstance(i, torch.Tensor):
return i.shape
return i
flop_registry: dict[Any, Any] = {}
def shape_wrapper(f):
@wraps(f)
def nf(*args, out_val=None, **kwargs):
args, kwargs, out_shape = tree_map(get_shape, (args, kwargs, out_val))
return f(*args, out_shape=out_shape, **kwargs)
return nf
def register_flop_formula(targets, get_raw=False) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:
def register_fun(flop_formula: Callable[_P, _T]) -> Callable[_P, _T]:
if not get_raw:
flop_formula = shape_wrapper(flop_formula)
def register(target):
if not isinstance(target, torch._ops.OpOverloadPacket):
raise ValueError(
f"register_flop_formula(targets): expected each target to be "
f"OpOverloadPacket (i.e. torch.ops.mylib.foo), got "
f"{target} which is of type {type(target)}")
if target in flop_registry:
raise RuntimeError(f"duplicate registrations for {target}")
flop_registry[target] = flop_formula
# To handle allowing multiple aten_ops at once
torch.utils._pytree.tree_map_(register, targets)
return flop_formula
return register_fun
@register_flop_formula(aten.mm)
def mm_flop(a_shape, b_shape, *args, out_shape=None, **kwargs) -> int:
"""Count flops for matmul."""
# Inputs should be a list of length 2.
# Inputs contains the shapes of two matrices.
m, k = a_shape
k2, n = b_shape
assert k == k2
# NB(chilli): Should be 2 * k - 1 technically for FLOPs.
return m * n * 2 * k
@register_flop_formula(aten.addmm)
def addmm_flop(self_shape, a_shape, b_shape, out_shape=None, **kwargs) -> int:
"""Count flops for addmm."""
return mm_flop(a_shape, b_shape)
@register_flop_formula(aten.bmm)
def bmm_flop(a_shape, b_shape, out_shape=None, **kwargs) -> int:
"""Count flops for the bmm operation."""
# Inputs should be a list of length 2.
# Inputs contains the shapes of two tensor.
b, m, k = a_shape
b2, k2, n = b_shape
assert b == b2
assert k == k2
# NB(chilli): Should be 2 * k - 1 technically for FLOPs.
flop = b * m * n * 2 * k
return flop
@register_flop_formula(aten.baddbmm)
def baddbmm_flop(self_shape, a_shape, b_shape, out_shape=None, **kwargs) -> int:
"""Count flops for the baddbmm operation."""
# Inputs should be a list of length 3.
# Inputs contains the shapes of three tensors.
return bmm_flop(a_shape, b_shape)
@register_flop_formula(aten._scaled_mm)
def _scaled_mm_flop(
a_shape,
b_shape,
scale_a_shape,
scale_b_shape,
bias_shape=None,
scale_result_shape=None,
out_dtype=None,
use_fast_accum=False,
out_shape=None,
**kwargs,
) -> int:
"""Count flops for _scaled_mm."""
return mm_flop(a_shape, b_shape)
def conv_flop_count(
x_shape: list[int],
w_shape: list[int],
out_shape: list[int],
transposed: bool = False,
) -> int:
"""Count flops for convolution.
Note only multiplication is
counted. Computation for bias are ignored.
Flops for a transposed convolution are calculated as
flops = (x_shape[2:] * prod(w_shape) * batch_size).
Args:
x_shape (list(int)): The input shape before convolution.
w_shape (list(int)): The filter shape.
out_shape (list(int)): The output shape after convolution.
transposed (bool): is the convolution transposed
Returns:
int: the number of flops
"""
batch_size = x_shape[0]
conv_shape = (x_shape if transposed else out_shape)[2:]
c_out, c_in, *filter_size = w_shape
"""
General idea here is that for a regular conv, for each point in the output
spatial dimension we convolve the filter with something (hence
`prod(conv_shape) * prod(filter_size)` ops). Then, this gets multiplied by
1. batch_size, 2. the cross product of input and weight channels.
For the transpose, it's not each point in the *output* spatial dimension but
each point in the *input* spatial dimension.
"""
# NB(chilli): I don't think this properly accounts for padding :think:
# NB(chilli): Should be 2 * c_in - 1 technically for FLOPs.
flop = prod(conv_shape) * prod(filter_size) * batch_size * c_out * c_in * 2
return flop
@register_flop_formula([aten.convolution, aten._convolution])
def conv_flop(x_shape, w_shape, _bias, _stride, _padding, _dilation, transposed, *args, out_shape=None, **kwargs) -> int:
"""Count flops for convolution."""
return conv_flop_count(x_shape, w_shape, out_shape, transposed=transposed)
@register_flop_formula(aten.convolution_backward)
def conv_backward_flop(
grad_out_shape,
x_shape,
w_shape,
_bias,
_stride,
_padding,
_dilation,
transposed,
_output_padding,
_groups,
output_mask,
out_shape) -> int:
def t(shape):
return [shape[1], shape[0]] + list(shape[2:])
flop_count = 0
"""
Let's say we have a regular 1D conv
{A, B, C} [inp]
{i, j} [weight]
=> (conv)
{Ai + Bj, Bi + Cj} [out]
And as a reminder, the transposed conv of the above is
=> {Ai, Aj + Bi, Bj + Ci, Cj} [transposed conv out]
For the backwards of conv, we now have
{D, E} [grad_out]
{A, B, C} [inp]
{i, j} [weight]
# grad_inp as conv_transpose(grad_out, weight)
Let's first compute grad_inp. To do so, we can simply look at all the
multiplications that each element of inp is involved in. For example, A is
only involved in the first element of the output (and thus only depends upon
D in grad_out), and C is only involved in the last element of the output
(and thus only depends upon E in grad_out)
{Di, Dj + Ei, Ej} [grad_inp]
Note that this corresponds to the below conv_transpose. This gives us the
output_mask[0] branch, which is grad_inp.
{D, E} [inp (grad_out)]
{i, j} [weight]
=> (conv_transpose)
{Di, Dj + Ei, Ej} [out (grad_inp)]
I leave the fact that grad_inp for a transposed conv is just conv(grad_out,
weight) as an exercise for the reader.
# grad_weight as conv(inp, grad_out)
To compute grad_weight, we again look at the terms in the output, which as
a reminder is:
=> {Ai + Bj, Bi + Cj} [out]
=> {D, E} [grad_out]
If we manually compute the gradient for the weights, we see it's
{AD + BE, BD + CE} [grad_weight]
This corresponds to the below conv
{A, B, C} [inp]
{D, E} [weight (grad_out)]
=> (conv)
{AD + BE, BD + CE} [out (grad_weight)]
# grad_weight of transposed conv as conv(grad_out, inp)
As a reminder, the terms of the output of a transposed conv are:
=> {Ai, Aj + Bi, Bj + Ci, Cj} [transposed conv out]
=> {D, E, F, G} [grad_out]
Manually computing the gradient for the weights, we see it's
{AD + BE + CF, AE + BF + CG} [grad_weight]
This corresponds to the below conv
{D, E, F, G} [inp (grad_out)]
{A, B, C} [weight (inp)]
=> (conv)
{AD + BE + CF, AE + BF + CG} [out (grad_weight)]
For the full backwards formula, there are also some details involving
transpose of the batch/channel dimensions and groups, but I skip those for
the sake of brevity (and they're pretty similar to matmul backwards)
Check [conv backwards decomposition as conv forwards]
"""
# grad_inp as conv_transpose(grad_out, weight)
if output_mask[0]:
grad_input_shape = get_shape(out_shape[0])
flop_count += conv_flop_count(grad_out_shape, w_shape, grad_input_shape, not transposed)
if output_mask[1]:
grad_weight_shape = get_shape(out_shape[1])
if transposed:
# grad_weight of transposed conv as conv(grad_out, inp)
flop_count += conv_flop_count(t(grad_out_shape), t(x_shape), t(grad_weight_shape), transposed=False)
else:
# grad_weight as conv(inp, grad_out)
flop_count += conv_flop_count(t(x_shape), t(grad_out_shape), t(grad_weight_shape), transposed=False)
return flop_count
def sdpa_flop_count(query_shape, key_shape, value_shape):
"""
Count flops for self-attention.
NB: We can assume that value_shape == key_shape
"""
b, h, s_q, d_q = query_shape
_b2, _h2, s_k, _d2 = key_shape
_b3, _h3, _s3, d_v = value_shape
assert b == _b2 == _b3 and h == _h2 == _h3 and d_q == _d2 and s_k == _s3 and d_q == _d2
total_flops = 0
# q: [b, h, s_q, d_q] @ k: [b, h, d_q, s_k] -> scores: [b, h, s_q, s_k]
total_flops += bmm_flop((b * h, s_q, d_q), (b * h, d_q, s_k))
# scores: [b, h, s_q, s_k] @ v: [b, h, s_k, d_v] -> out: [b, h, s_q, d_v]
total_flops += bmm_flop((b * h, s_q, s_k), (b * h, s_k, d_v))
return total_flops
@register_flop_formula([aten._scaled_dot_product_efficient_attention,
aten._scaled_dot_product_flash_attention,
aten._scaled_dot_product_cudnn_attention])
def sdpa_flop(query_shape, key_shape, value_shape, *args, out_shape=None, **kwargs) -> int:
"""Count flops for self-attention."""
# NB: We aren't accounting for causal attention here
return sdpa_flop_count(query_shape, key_shape, value_shape)
def _offsets_to_lengths(offsets, max_len):
"""
If the offsets tensor is fake, then we don't know the actual lengths.
In that case, we can just assume the worst case; each batch has max length.
"""
from torch._subclasses.fake_tensor import FakeTensor
from torch._subclasses.functional_tensor import FunctionalTensor
if not isinstance(offsets, (FakeTensor, FunctionalTensor)) and offsets.device.type != "meta":
return offsets.diff().tolist()
return [max_len] * (offsets.size(0) - 1)
def _unpack_flash_attention_nested_shapes(
*,
query,
key,
value,
grad_out=None,
cum_seq_q,
cum_seq_k,
max_q,
max_k,
) -> Iterator[tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...], Optional[tuple[int, ...]]]]:
"""
Given inputs to a flash_attention_(forward|backward) kernel, this will handle behavior for
NestedTensor inputs by effectively unbinding the NestedTensor and yielding the shapes for
each batch element.
In the case that this isn't a NestedTensor kernel, then it just yields the original shapes.
"""
if cum_seq_q is not None:
# This means we should be dealing with a Nested Jagged Tensor query.
# The inputs will have shape (sum(sequence len), heads, dimension)
# In comparison, non-Nested inputs have shape (batch, heads, sequence len, dimension)
# To deal with this, we convert to a shape of (batch, heads, max_seq_len, dimension)
# So the flops calculation in this case is an overestimate of the actual flops.
assert len(key.shape) == 3
assert len(value.shape) == 3
assert grad_out is None or grad_out.shape == query.shape
_, h_q, d_q = query.shape
_, h_k, d_k = key.shape
_, h_v, d_v = value.shape
assert cum_seq_q is not None
assert cum_seq_k is not None
assert cum_seq_q.shape == cum_seq_k.shape
seq_q_lengths = _offsets_to_lengths(cum_seq_q, max_q)
seq_k_lengths = _offsets_to_lengths(cum_seq_k, max_k)
for (seq_q_len, seq_k_len) in zip(seq_q_lengths, seq_k_lengths):
new_query_shape = (1, h_q, seq_q_len, d_q)
new_key_shape = (1, h_k, seq_k_len, d_k)
new_value_shape = (1, h_v, seq_k_len, d_v)
new_grad_out_shape = new_query_shape if grad_out is not None else None
yield new_query_shape, new_key_shape, new_value_shape, new_grad_out_shape
return
yield query.shape, key.shape, value.shape, grad_out.shape if grad_out is not None else None
def _unpack_efficient_attention_nested_shapes(
*,
query,
key,
value,
grad_out=None,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
) -> Iterator[tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...], Optional[tuple[int, ...]]]]:
"""
Given inputs to a efficient_attention_(forward|backward) kernel, this will handle behavior for
NestedTensor inputs by effectively unbinding the NestedTensor and yielding the shapes for
each batch element.
In the case that this isn't a NestedTensor kernel, then it just yields the original shapes.
"""
if cu_seqlens_q is not None:
# Unlike flash_attention_forward, we get a 4D tensor instead of a 3D tensor for efficient attention.
#
# This means we should be dealing with a Nested Jagged Tensor query.
# The inputs will have shape (sum(sequence len), heads, dimension)
# In comparison, non-Nested inputs have shape (batch, heads, sequence len, dimension)
# To deal with this, we convert to a shape of (batch, heads, max_seq_len, dimension)
# So the flops calculation in this case is an overestimate of the actual flops.
assert len(key.shape) == 4
assert len(value.shape) == 4
assert grad_out is None or grad_out.shape == query.shape
_, _, h_q, d_q = query.shape
_, _, h_k, d_k = key.shape
_, _, h_v, d_v = value.shape
assert cu_seqlens_q is not None
assert cu_seqlens_k is not None
assert cu_seqlens_q.shape == cu_seqlens_k.shape
seqlens_q = _offsets_to_lengths(cu_seqlens_q, max_seqlen_q)
seqlens_k = _offsets_to_lengths(cu_seqlens_k, max_seqlen_k)
for len_q, len_k in zip(seqlens_q, seqlens_k):
new_query_shape = (1, h_q, len_q, d_q)
new_key_shape = (1, h_k, len_k, d_k)
new_value_shape = (1, h_v, len_k, d_v)
new_grad_out_shape = new_query_shape if grad_out is not None else None
yield new_query_shape, new_key_shape, new_value_shape, new_grad_out_shape
return
yield query.shape, key.shape, value.shape, grad_out.shape if grad_out is not None else None
@register_flop_formula(aten._flash_attention_forward, get_raw=True)
def _flash_attention_forward_flop(
query,
key,
value,
cum_seq_q,
cum_seq_k,
max_q,
max_k,
*args,
out_shape=None,
**kwargs
) -> int:
"""Count flops for self-attention."""
# NB: We aren't accounting for causal attention here
# in case this is a nested tensor, we unpack the individual batch elements
# and then sum the flops per batch element
sizes = _unpack_flash_attention_nested_shapes(
query=query,
key=key,
value=value,
cum_seq_q=cum_seq_q,
cum_seq_k=cum_seq_k,
max_q=max_q,
max_k=max_k,
)
return sum(
sdpa_flop_count(query_shape, key_shape, value_shape)
for query_shape, key_shape, value_shape, _ in sizes
)
@register_flop_formula(aten._efficient_attention_forward, get_raw=True)
def _efficient_attention_forward_flop(
query,
key,
value,
bias,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
*args,
**kwargs
) -> int:
"""Count flops for self-attention."""
# NB: We aren't accounting for causal attention here
# in case this is a nested tensor, we unpack the individual batch elements
# and then sum the flops per batch element
sizes = _unpack_efficient_attention_nested_shapes(
query=query,
key=key,
value=value,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
max_seqlen_q=max_seqlen_q,
max_seqlen_k=max_seqlen_k,
)
return sum(
sdpa_flop_count(query_shape, key_shape, value_shape)
for query_shape, key_shape, value_shape, _ in sizes
)
def sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape):
total_flops = 0
b, h, s_q, d_q = query_shape
_b2, _h2, s_k, _d2 = key_shape
_b3, _h3, _s3, d_v = value_shape
_b4, _h4, _s4, _d4 = grad_out_shape
assert b == _b2 == _b3 == _b4 and h == _h2 == _h3 == _h4 and d_q == _d2
assert d_v == _d4 and s_k == _s3 and s_q == _s4
total_flops = 0
# Step 1: We recompute the scores matrix.
# q: [b, h, s_q, d_q] @ k: [b, h, d_q, s_k] -> scores: [b, h, s_q, s_k]
total_flops += bmm_flop((b * h, s_q, d_q), (b * h, d_q, s_k))
# Step 2: We propagate the gradients through the score @ v operation.
# gradOut: [b, h, s_q, d_v] @ v: [b, h, d_v, s_k] -> gradScores: [b, h, s_q, s_k]
total_flops += bmm_flop((b * h, s_q, d_v), (b * h, d_v, s_k))
# scores: [b, h, s_k, s_q] @ gradOut: [b, h, s_q, d_v] -> gradV: [b, h, s_k, d_v]
total_flops += bmm_flop((b * h, s_k, s_q), (b * h, s_q, d_v))
# Step 3: We propagate th gradients through the k @ v operation
# gradScores: [b, h, s_q, s_k] @ k: [b, h, s_k, d_q] -> gradQ: [b, h, s_q, d_q]
total_flops += bmm_flop((b * h, s_q, s_k), (b * h, s_k, d_q))
# q: [b, h, d_q, s_q] @ gradScores: [b, h, s_q, s_k] -> gradK: [b, h, d_q, s_k]
total_flops += bmm_flop((b * h, d_q, s_q), (b * h, s_q, s_k))
return total_flops
@register_flop_formula([aten._scaled_dot_product_efficient_attention_backward,
aten._scaled_dot_product_flash_attention_backward,
aten._scaled_dot_product_cudnn_attention_backward])
def sdpa_backward_flop(grad_out_shape, query_shape, key_shape, value_shape, *args, out_shape=None, **kwargs) -> int:
"""Count flops for self-attention backward."""
return sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape)
@register_flop_formula(aten._flash_attention_backward, get_raw=True)
def _flash_attention_backward_flop(
grad_out,
query,
key,
value,
out, # named _out_shape to avoid kwarg collision with out_shape created in wrapper
logsumexp,
cum_seq_q,
cum_seq_k,
max_q,
max_k,
*args,
**kwargs,
) -> int:
# in case this is a nested tensor, we unpack the individual batch elements
# and then sum the flops per batch element
shapes = _unpack_flash_attention_nested_shapes(
query=query,
key=key,
value=value,
grad_out=grad_out,
cum_seq_q=cum_seq_q,
cum_seq_k=cum_seq_k,
max_q=max_q,
max_k=max_k,
)
return sum(
sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape)
for query_shape, key_shape, value_shape, grad_out_shape in shapes
)
@register_flop_formula(aten._efficient_attention_backward, get_raw=True)
def _efficient_attention_backward_flop(
grad_out,
query,
key,
value,
bias,
out, # named _out to avoid kwarg collision with out created in wrapper
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
*args,
**kwargs,
) -> int:
# in case this is a nested tensor, we unpack the individual batch elements
# and then sum the flops per batch element
shapes = _unpack_efficient_attention_nested_shapes(
query=query,
key=key,
value=value,
grad_out=grad_out,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
max_seqlen_q=max_seqlen_q,
max_seqlen_k=max_seqlen_k,
)
return sum(
sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape)
for query_shape, key_shape, value_shape, grad_out_shape in shapes
)
flop_registry = {
aten.mm: mm_flop,
aten.addmm: addmm_flop,
aten.bmm: bmm_flop,
aten.baddbmm: baddbmm_flop,
aten._scaled_mm: _scaled_mm_flop,
aten.convolution: conv_flop,
aten._convolution: conv_flop,
aten.convolution_backward: conv_backward_flop,
aten._scaled_dot_product_efficient_attention: sdpa_flop,
aten._scaled_dot_product_flash_attention: sdpa_flop,
aten._scaled_dot_product_cudnn_attention: sdpa_flop,
aten._scaled_dot_product_efficient_attention_backward: sdpa_backward_flop,
aten._scaled_dot_product_flash_attention_backward: sdpa_backward_flop,
aten._scaled_dot_product_cudnn_attention_backward: sdpa_backward_flop,
aten._flash_attention_forward: _flash_attention_forward_flop,
aten._efficient_attention_forward: _efficient_attention_forward_flop,
aten._flash_attention_backward: _flash_attention_backward_flop,
aten._efficient_attention_backward: _efficient_attention_backward_flop,
}
def normalize_tuple(x):
if not isinstance(x, tuple):
return (x,)
return x
# Define the suffixes for different orders of magnitude
suffixes = ["", "K", "M", "B", "T"]
# Thanks BingChat!
def get_suffix_str(number):
# Find the index of the appropriate suffix based on the number of digits
# with some additional overflow.
# i.e. 1.01B should be displayed as 1001M, not 1.001B
index = max(0, min(len(suffixes) - 1, (len(str(number)) - 2) // 3))
return suffixes[index]
def convert_num_with_suffix(number, suffix):
index = suffixes.index(suffix)
# Divide the number by 1000^index and format it to two decimal places
value = f"{number / 1000 ** index:.3f}"
# Return the value and the suffix as a string
return value + suffixes[index]
def convert_to_percent_str(num, denom):
if denom == 0:
return "0%"
return f"{num / denom:.2%}"
def _pytreeify_preserve_structure(f):
@wraps(f)
def nf(args):
flat_args, spec = tree_flatten(args)
out = f(*flat_args)
return tree_unflatten(out, spec)
return nf
class FlopCounterMode:
"""
``FlopCounterMode`` is a context manager that counts the number of flops within its context.
It does this using a ``TorchDispatchMode``.
It also supports hierarchical output by passing a module (or list of
modules) to FlopCounterMode on construction. If you do not need hierarchical
output, you do not need to use it with a module.
Example usage
.. code-block:: python
mod = ...
with FlopCounterMode(mod) as flop_counter:
mod.sum().backward()
"""
def __init__(
self,
mods: Optional[Union[torch.nn.Module, list[torch.nn.Module]]] = None,
depth: int = 2,
display: bool = True,
custom_mapping: Optional[dict[Any, Any]] = None):
super().__init__()
self.flop_counts: dict[str, dict[Any, int]] = defaultdict(lambda: defaultdict(int))
self.depth = depth
self.display = display
self.mode: Optional[_FlopCounterMode] = None
if custom_mapping is None:
custom_mapping = {}
if mods is not None:
warnings.warn("mods argument is not needed anymore, you can stop passing it", stacklevel=2)
self.flop_registry = {
**flop_registry,
**{k: v if getattr(v, "_get_raw", False) else shape_wrapper(v) for k, v in custom_mapping.items()}
}
self.mod_tracker = ModuleTracker()
def get_total_flops(self) -> int:
return sum(self.flop_counts['Global'].values())
def get_flop_counts(self) -> dict[str, dict[Any, int]]:
"""Return the flop counts as a dictionary of dictionaries.
The outer
dictionary is keyed by module name, and the inner dictionary is keyed by
operation name.
Returns:
Dict[str, Dict[Any, int]]: The flop counts as a dictionary.
"""
return {k: dict(v) for k, v in self.flop_counts.items()}
def get_table(self, depth=None):
if depth is None:
depth = self.depth
if depth is None:
depth = 999999
import tabulate
tabulate.PRESERVE_WHITESPACE = True
header = ["Module", "FLOP", "% Total"]
values = []
global_flops = self.get_total_flops()
global_suffix = get_suffix_str(global_flops)
is_global_subsumed = False
def process_mod(mod_name, depth):
nonlocal is_global_subsumed
total_flops = sum(self.flop_counts[mod_name].values())
is_global_subsumed |= total_flops >= global_flops
padding = " " * depth
values = []
values.append([
padding + mod_name,
convert_num_with_suffix(total_flops, global_suffix),
convert_to_percent_str(total_flops, global_flops)
])
for k, v in self.flop_counts[mod_name].items():
values.append([
padding + " - " + str(k),
convert_num_with_suffix(v, global_suffix),
convert_to_percent_str(v, global_flops)
])
return values
for mod in sorted(self.flop_counts.keys()):
if mod == 'Global':
continue
mod_depth = mod.count(".") + 1
if mod_depth > depth:
continue
cur_values = process_mod(mod, mod_depth - 1)
values.extend(cur_values)
# We do a bit of messing around here to only output the "Global" value
# if there are any FLOPs in there that aren't already fully contained by
# a module.
if 'Global' in self.flop_counts and not is_global_subsumed:
for value in values:
value[0] = " " + value[0]
values = process_mod('Global', 0) + values
if len(values) == 0:
values = [["Global", "0", "0%"]]
return tabulate.tabulate(values, headers=header, colalign=("left", "right", "right"))
# NB: This context manager is NOT reentrant
def __enter__(self):
self.flop_counts.clear()
self.mod_tracker.__enter__()
self.mode = _FlopCounterMode(self)
self.mode.__enter__()
return self
def __exit__(self, *args):
assert self.mode is not None
b = self.mode.__exit__(*args)
self.mode = None # break cycles
self.mod_tracker.__exit__()
if self.display:
print(self.get_table(self.depth))
return b
def _count_flops(self, func_packet, out, args, kwargs):
if func_packet in self.flop_registry:
flop_count_func = self.flop_registry[func_packet]
flop_count = flop_count_func(*args, **kwargs, out_val=out) # type: ignore[operator]
for par in set(self.mod_tracker.parents):
self.flop_counts[par][func_packet] += flop_count
return out
class _FlopCounterMode(TorchDispatchMode):
def __init__(self, counter: FlopCounterMode):
self.counter = counter
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs if kwargs else {}
# Skip ops from non-standard dispatch_sizes_strides_policy such as NJT
if func in {torch.ops.aten.is_contiguous.default,
torch.ops.aten.is_contiguous.memory_format,
torch.ops.aten.is_strides_like_format.default,
torch.ops.aten.is_non_overlapping_and_dense.default,
torch.ops.aten.size.default,
torch.ops.aten.sym_size.default,
torch.ops.aten.stride.default,
torch.ops.aten.sym_stride.default,
torch.ops.aten.storage_offset.default,
torch.ops.aten.sym_storage_offset.default,
torch.ops.aten.numel.default,
torch.ops.aten.sym_numel.default,
torch.ops.aten.dim.default,
torch.ops.prim.layout.default}:
return NotImplemented
# If we don't have func in flop_registry, see if it can decompose
if func not in self.counter.flop_registry and func is not torch.ops.prim.device.default:
with self:
r = func.decompose(*args, **kwargs)
if r is not NotImplemented:
return r
# no further decomposition; execute & count flops
out = func(*args, **kwargs)
return self.counter._count_flops(func._overloadpacket, out, args, kwargs)
```
|
======================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.03 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\hipify\__init__.py
ENCODING: utf-8
```py
from .version import __version__
```
|
=======================================================================================================================
SOURCE CODE FILE: constants.py
LINES: 1
SIZE: 1.21 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\hipify\constants.py
ENCODING: utf-8
```py
"""Constants for annotations in the mapping.
The constants defined here are used to annotate the mapping tuples in cuda_to_hip_mappings.py.
They are based on
https://github.com/ROCm/HIPIFY/blob/master/src/Statistics.h
and fall in three categories: 1) type of mapping, 2) API of mapping, 3) unsupported
mapping.
"""
CONV_VERSION = 0,
CONV_INIT = 1
CONV_DEVICE = 2
CONV_MEM = 3
CONV_KERN = 4
CONV_COORD_FUNC = 5
CONV_MATH_FUNC = 6
CONV_DEVICE_FUNC = 7
CONV_SPECIAL_FUNC = 8
CONV_STREAM = 9
CONV_EVENT = 10
CONV_OCCUPANCY = 11
CONV_CONTEXT = 12
CONV_PEER = 13
CONV_MODULE = 14
CONV_CACHE = 15
CONV_EXEC = 16
CONV_ERROR = 17
CONV_DEF = 18
CONV_TEX = 19
CONV_GL = 20
CONV_GRAPHICS = 21
CONV_SURFACE = 22
CONV_JIT = 23
CONV_D3D9 = 24
CONV_D3D10 = 25
CONV_D3D11 = 26
CONV_VDPAU = 27
CONV_EGL = 28
CONV_THREAD = 29
CONV_OTHER = 30
CONV_INCLUDE = 31
CONV_INCLUDE_CUDA_MAIN_H = 32
CONV_TYPE = 33
CONV_LITERAL = 34
CONV_NUMERIC_LITERAL = 35
CONV_LAST = 36
API_DRIVER = 37
API_RUNTIME = 38
API_BLAS = 39
API_SPECIAL = 40
API_RAND = 41
API_LAST = 42
API_FFT = 43
API_RTC = 44
API_ROCTX = 45
HIP_UNSUPPORTED = 46
API_PYTORCH = 1337
API_CAFFE2 = 1338
API_C10 = 1339
API_ROCMSMI = 1340
```
|
==================================================================================================================================
SOURCE CODE FILE: cuda_to_hip_mappings.py
LINES: 1
SIZE: 351.83 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\hipify\cuda_to_hip_mappings.py
ENCODING: utf-8
```py
import collections
import os
from .constants import (API_BLAS, API_C10, API_CAFFE2, API_DRIVER, API_FFT,
API_PYTORCH, API_RAND, API_ROCTX, API_RTC, API_RUNTIME,
API_SPECIAL, API_ROCMSMI, CONV_CACHE, CONV_CONTEXT, CONV_D3D9,
CONV_D3D10, CONV_D3D11, CONV_DEF, CONV_DEVICE,
CONV_DEVICE_FUNC, CONV_EGL, CONV_ERROR, CONV_EVENT,
CONV_EXEC, CONV_GL, CONV_GRAPHICS, CONV_INCLUDE,
CONV_INCLUDE_CUDA_MAIN_H, CONV_INIT, CONV_JIT,
CONV_MATH_FUNC, CONV_MEM, CONV_MODULE,
CONV_NUMERIC_LITERAL, CONV_OCCUPANCY, CONV_OTHER,
CONV_PEER, CONV_SPECIAL_FUNC, CONV_STREAM,
CONV_SURFACE, CONV_TEX, CONV_THREAD, CONV_TYPE,
CONV_VDPAU, CONV_VERSION, HIP_UNSUPPORTED)
""" Mapping of CUDA functions, include files, constants, and types to ROCm/HIP equivalents
This closely follows the implementation in hipify-clang
https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/CUDA2HipMap.cpp
and its structure.
There are different maps for fundamental names, include files, identifies, sparse, and
PyTorch specific translations.
Each of the entries in these maps translates a CUDA string to a tuple containing the
ROCm/HIP string, a type and API annotation and - optionally - an annotation if it is not
supported in ROCm/HIP yet.
"""
_IS_FBCODE = os.environ.get("IS_FBCODE", "0") == "1"
# FBCODE compiles against rccl sources instead of an installed rccl package.
# The header location is src/rccl.h versus rccl/rccl.h, respectively.
_RCCL_HEADER = "<rccl.h>" if _IS_FBCODE else "<rccl/rccl.h>"
# List of math functions that should be replaced inside device code only.
MATH_TRANSPILATIONS = collections.OrderedDict(
[
("std::max", ("::max")),
("std::min", ("::min")),
("std::ceil", ("::ceil")),
("std::floor", ("::floor")),
("std::exp", ("::exp")),
("std::log", ("::log")),
("std::pow", ("::pow")),
("std::fabs", ("::fabs")),
("std::fmod", ("::fmod")),
("std::remainder", ("::remainder")),
("std::frexp", ("::frexp")),
]
)
CUDA_TYPE_NAME_MAP = collections.OrderedDict(
[
("CUresult", ("hipError_t", CONV_TYPE, API_DRIVER)),
("cudaError_t", ("hipError_t", CONV_TYPE, API_RUNTIME)),
("cudaError", ("hipError_t", CONV_TYPE, API_RUNTIME)),
(
"CUDA_ARRAY3D_DESCRIPTOR",
("HIP_ARRAY3D_DESCRIPTOR", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUDA_ARRAY_DESCRIPTOR", ("HIP_ARRAY_DESCRIPTOR", CONV_TYPE, API_DRIVER)),
("CUDA_MEMCPY2D", ("hip_Memcpy2D", CONV_TYPE, API_DRIVER)),
("CUDA_MEMCPY3D", ("HIP_MEMCPY3D", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUDA_MEMCPY3D_PEER",
("HIP_MEMCPY3D_PEER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_POINTER_ATTRIBUTE_P2P_TOKENS",
(
"HIP_POINTER_ATTRIBUTE_P2P_TOKENS",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CUDA_RESOURCE_DESC",
("HIP_RESOURCE_DESC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_RESOURCE_VIEW_DESC",
("HIP_RESOURCE_VIEW_DESC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUipcEventHandle",
("hipIpcEventHandle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUipcMemHandle", ("hipIpcMemHandle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUaddress_mode", ("hipAddress_mode", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUarray_cubemap_face",
("hipArray_cubemap_face", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUarray_format", ("hipArray_format", CONV_TYPE, API_DRIVER)),
("CUcomputemode", ("hipComputemode", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUmem_advise", ("hipMemAdvise", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUmem_range_attribute",
("hipMemRangeAttribute", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUctx_flags", ("hipCctx_flags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUdevice", ("hipDevice_t", CONV_TYPE, API_DRIVER)),
("CUdevice_attribute_enum", ("hipDeviceAttribute_t", CONV_TYPE, API_DRIVER)),
("CUdevice_attribute", ("hipDeviceAttribute_t", CONV_TYPE, API_DRIVER)),
("CUpointer_attribute", ("hipPointer_attribute", CONV_TYPE, API_DRIVER)),
("CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL", ("HIP_POINTER_ATTRIBUTE_DEVICE_ORDINAL", CONV_TYPE, API_DRIVER)),
("CU_POINTER_ATTRIBUTE_BUFFER_ID", ("HIP_POINTER_ATTRIBUTE_BUFFER_ID", CONV_TYPE, API_DRIVER)),
("CUdeviceptr", ("hipDeviceptr_t", CONV_TYPE, API_DRIVER)),
("CUarray_st", ("hipArray", CONV_TYPE, API_DRIVER)),
("CUarray", ("hipArray *", CONV_TYPE, API_DRIVER)),
("CUdevprop_st", ("hipDeviceProp_t", CONV_TYPE, API_DRIVER)),
("CUdevprop", ("hipDeviceProp_t", CONV_TYPE, API_DRIVER)),
("CUfunction", ("hipFunction_t", CONV_TYPE, API_DRIVER)),
(
"CUgraphicsResource",
("hipGraphicsResource_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUmipmappedArray",
("hipMipmappedArray_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUfunction_attribute",
("hipFuncAttribute_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUfunction_attribute_enum",
("hipFuncAttribute_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUgraphicsMapResourceFlags",
("hipGraphicsMapFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUgraphicsMapResourceFlags_enum",
("hipGraphicsMapFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUgraphicsRegisterFlags",
("hipGraphicsRegisterFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUgraphicsRegisterFlags_enum",
("hipGraphicsRegisterFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUoccupancy_flags",
("hipOccupancyFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUoccupancy_flags_enum",
("hipOccupancyFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUfunc_cache_enum", ("hipFuncCache", CONV_TYPE, API_DRIVER)),
("CUfunc_cache", ("hipFuncCache", CONV_TYPE, API_DRIVER)),
("CUipcMem_flags", ("hipIpcMemFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUipcMem_flags_enum",
("hipIpcMemFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUjit_cacheMode", ("hipJitCacheMode", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUjit_cacheMode_enum",
("hipJitCacheMode", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
("CUjit_fallback", ("hipJitFallback", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUjit_fallback_enum",
("hipJitFallback", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
("CUjit_option", ("hipJitOption", CONV_JIT, API_DRIVER)),
("CUjit_option_enum", ("hipJitOption", CONV_JIT, API_DRIVER)),
("CUjit_target", ("hipJitTarget", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CUjit_target_enum", ("hipJitTarget", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
("CUjitInputType", ("hipJitInputType", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUjitInputType_enum",
("hipJitInputType", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
("CUlimit", ("hipLimit_t", CONV_TYPE, API_DRIVER)),
("CUlimit_enum", ("hipLimit_t", CONV_TYPE, API_DRIVER)),
(
"CUmemAttach_flags",
("hipMemAttachFlags_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUmemAttach_flags_enum",
("hipMemAttachFlags_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUmemorytype", ("hipMemType_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUmemorytype_enum", ("hipMemType_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
("CUresourcetype", ("hipResourceType", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUresourcetype_enum",
("hipResourceType", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
("CUresourceViewFormat", ("hipResourceViewFormat", CONV_TEX, API_DRIVER)),
("CUresourceViewFormat_enum", ("hipResourceViewFormat", CONV_TEX, API_DRIVER)),
("CUsharedconfig", ("hipSharedMemConfig", CONV_TYPE, API_DRIVER)),
("CUsharedconfig_enum", ("hipSharedMemConfig", CONV_TYPE, API_DRIVER)),
("CUcontext", ("hipCtx_t", CONV_TYPE, API_DRIVER)),
("CUmodule", ("hipModule_t", CONV_TYPE, API_DRIVER)),
("CUstream", ("hipStream_t", CONV_TYPE, API_DRIVER)),
("CUstream_st", ("ihipStream_t", CONV_TYPE, API_DRIVER)),
("CUstreamCallback", ("hipStreamCallback_t", CONV_TYPE, API_DRIVER)),
("CUsurfObject", ("hipSurfaceObject", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUsurfref",
("hipSurfaceReference_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUtexObject", ("hipTextureObject_t", CONV_TYPE, API_DRIVER)),
("CUtexref", ("textureReference", CONV_TYPE, API_DRIVER)),
("CUstream_flags", ("hipStreamFlags", CONV_TYPE, API_DRIVER)),
(
"CUstreamWaitValue_flags",
("hipStreamWaitValueFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUstreamWriteValue_flags",
("hipStreamWriteValueFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUstreamBatchMemOpType",
("hipStreamBatchMemOpType", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUdevice_P2PAttribute",
("hipDeviceP2PAttribute", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUevent", ("hipEvent_t", CONV_TYPE, API_DRIVER)),
("CUevent_st", ("ihipEvent_t", CONV_TYPE, API_DRIVER)),
("CUevent_flags", ("hipEventFlags", CONV_EVENT, API_DRIVER, HIP_UNSUPPORTED)),
("CUfilter_mode", ("hipTextureFilterMode", CONV_TEX, API_DRIVER)),
("CUGLDeviceList", ("hipGLDeviceList", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("CUGLmap_flags", ("hipGLMapFlags", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUd3d9DeviceList",
("hipD3D9DeviceList", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUd3d9map_flags",
("hipD3D9MapFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUd3d9register_flags",
("hipD3D9RegisterFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUd3d10DeviceList",
("hipd3d10DeviceList", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUd3d10map_flags",
("hipD3D10MapFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUd3d10register_flags",
("hipD3D10RegisterFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUd3d11DeviceList",
("hipd3d11DeviceList", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUeglStreamConnection_st",
("hipEglStreamConnection", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUeglStreamConnection",
("hipEglStreamConnection", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"libraryPropertyType_t",
("hipLibraryPropertyType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"libraryPropertyType",
("hipLibraryPropertyType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaStreamCallback_t", ("hipStreamCallback_t", CONV_TYPE, API_RUNTIME)),
("cudaArray", ("hipArray", CONV_MEM, API_RUNTIME)),
("cudaArray_t", ("hipArray_t", CONV_MEM, API_RUNTIME)),
("cudaArray_const_t", ("hipArray_const_t", CONV_MEM, API_RUNTIME)),
("cudaMipmappedArray_t", ("hipMipmappedArray_t", CONV_MEM, API_RUNTIME)),
(
"cudaMipmappedArray_const_t",
("hipMipmappedArray_const_t", CONV_MEM, API_RUNTIME),
),
("cudaArrayDefault", ("hipArrayDefault", CONV_MEM, API_RUNTIME)),
("cudaArrayLayered", ("hipArrayLayered", CONV_MEM, API_RUNTIME)),
(
"cudaArraySurfaceLoadStore",
("hipArraySurfaceLoadStore", CONV_MEM, API_RUNTIME),
),
("cudaArrayCubemap", ("hipArrayCubemap", CONV_MEM, API_RUNTIME)),
("cudaArrayTextureGather", ("hipArrayTextureGather", CONV_MEM, API_RUNTIME)),
("cudaMemoryAdvise", ("hipMemoryAdvise", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
(
"cudaMemRangeAttribute",
("hipMemRangeAttribute", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMemcpyKind", ("hipMemcpyKind", CONV_MEM, API_RUNTIME)),
("cudaMemoryType", ("hipMemoryType", CONV_MEM, API_RUNTIME)),
("cudaExtent", ("hipExtent", CONV_MEM, API_RUNTIME)),
("cudaPitchedPtr", ("hipPitchedPtr", CONV_MEM, API_RUNTIME)),
("cudaPos", ("hipPos", CONV_MEM, API_RUNTIME)),
("cudaEvent_t", ("hipEvent_t", CONV_TYPE, API_RUNTIME)),
("cudaStream_t", ("hipStream_t", CONV_TYPE, API_RUNTIME)),
("cudaPointerAttributes", ("hipPointerAttribute_t", CONV_TYPE, API_RUNTIME)),
("cudaDeviceAttr", ("hipDeviceAttribute_t", CONV_TYPE, API_RUNTIME)),
("cudaDeviceProp", ("hipDeviceProp_t", CONV_TYPE, API_RUNTIME)),
(
"cudaDeviceP2PAttr",
("hipDeviceP2PAttribute", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaComputeMode",
("hipComputeMode", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaFuncCache", ("hipFuncCache_t", CONV_CACHE, API_RUNTIME)),
(
"cudaFuncAttributes",
("hipFuncAttributes", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaSharedMemConfig", ("hipSharedMemConfig", CONV_TYPE, API_RUNTIME)),
("cudaLimit", ("hipLimit_t", CONV_TYPE, API_RUNTIME)),
("cudaOutputMode", ("hipOutputMode", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaTextureReadMode", ("hipTextureReadMode", CONV_TEX, API_RUNTIME)),
("cudaTextureFilterMode", ("hipTextureFilterMode", CONV_TEX, API_RUNTIME)),
("cudaChannelFormatKind", ("hipChannelFormatKind", CONV_TEX, API_RUNTIME)),
("cudaChannelFormatDesc", ("hipChannelFormatDesc", CONV_TEX, API_RUNTIME)),
("cudaResourceDesc", ("hipResourceDesc", CONV_TEX, API_RUNTIME)),
("cudaResourceViewDesc", ("hipResourceViewDesc", CONV_TEX, API_RUNTIME)),
("cudaTextureDesc", ("hipTextureDesc", CONV_TEX, API_RUNTIME)),
(
"surfaceReference",
("hipSurfaceReference", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaTextureObject_t", ("hipTextureObject_t", CONV_TEX, API_RUNTIME)),
("cudaResourceType", ("hipResourceType", CONV_TEX, API_RUNTIME)),
("cudaResourceViewFormat", ("hipResourceViewFormat", CONV_TEX, API_RUNTIME)),
("cudaTextureAddressMode", ("hipTextureAddressMode", CONV_TEX, API_RUNTIME)),
(
"cudaSurfaceBoundaryMode",
("hipSurfaceBoundaryMode", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaSurfaceFormatMode",
("hipSurfaceFormatMode", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaTextureType1D", ("hipTextureType1D", CONV_TEX, API_RUNTIME)),
("cudaTextureType2D", ("hipTextureType2D", CONV_TEX, API_RUNTIME)),
("cudaTextureType3D", ("hipTextureType3D", CONV_TEX, API_RUNTIME)),
("cudaTextureTypeCubemap", ("hipTextureTypeCubemap", CONV_TEX, API_RUNTIME)),
(
"cudaTextureType1DLayered",
("hipTextureType1DLayered", CONV_TEX, API_RUNTIME),
),
(
"cudaTextureType2DLayered",
("hipTextureType2DLayered", CONV_TEX, API_RUNTIME),
),
(
"cudaTextureTypeCubemapLayered",
("hipTextureTypeCubemapLayered", CONV_TEX, API_RUNTIME),
),
("cudaIpcEventHandle_t", ("hipIpcEventHandle_t", CONV_TYPE, API_RUNTIME)),
("cudaIpcEventHandle_st", ("hipIpcEventHandle_t", CONV_TYPE, API_RUNTIME)),
("cudaIpcMemHandle_t", ("hipIpcMemHandle_t", CONV_TYPE, API_RUNTIME)),
("cudaIpcMemHandle_st", ("hipIpcMemHandle_t", CONV_TYPE, API_RUNTIME)),
(
"cudaGraphicsCubeFace",
("hipGraphicsCubeFace", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsMapFlags",
("hipGraphicsMapFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsRegisterFlags",
("hipGraphicsRegisterFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLDeviceList",
("hipGLDeviceList", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaGLMapFlags", ("hipGLMapFlags", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)),
(
"cudaD3D9DeviceList",
("hipD3D9DeviceList", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9MapFlags",
("hipD3D9MapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9RegisterFlags",
("hipD3D9RegisterFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10DeviceList",
("hipd3d10DeviceList", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10MapFlags",
("hipD3D10MapFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10RegisterFlags",
("hipD3D10RegisterFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D11DeviceList",
("hipd3d11DeviceList", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaEglStreamConnection",
("hipEglStreamConnection", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED),
),
("cublasHandle_t", ("hipblasHandle_t", CONV_TYPE, API_BLAS)),
("cublasOperation_t", ("hipblasOperation_t", CONV_TYPE, API_BLAS)),
("cublasStatus_t", ("hipblasStatus_t", CONV_TYPE, API_BLAS)),
("cublasFillMode_t", ("hipblasFillMode_t", CONV_TYPE, API_BLAS)),
("cublasDiagType_t", ("hipblasDiagType_t", CONV_TYPE, API_BLAS)),
("cublasSideMode_t", ("hipblasSideMode_t", CONV_TYPE, API_BLAS)),
("cublasPointerMode_t", ("hipblasPointerMode_t", CONV_TYPE, API_BLAS)),
("cublasGemmAlgo_t", ("hipblasGemmAlgo_t", CONV_TYPE, API_BLAS)),
(
"cublasAtomicsMode_t",
("hipblasAtomicsMode_t", CONV_TYPE, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDataType_t",
("hipblasDatatype_t", CONV_TYPE, API_BLAS, HIP_UNSUPPORTED),
),
("curandStatus", ("hiprandStatus_t", CONV_TYPE, API_RAND)),
("curandStatus_t", ("hiprandStatus_t", CONV_TYPE, API_RAND)),
("curandRngType", ("hiprandRngType_t", CONV_TYPE, API_RAND)),
("curandRngType_t", ("hiprandRngType_t", CONV_TYPE, API_RAND)),
("curandGenerator_st", ("hiprandGenerator_st", CONV_TYPE, API_RAND)),
("curandGenerator_t", ("hiprandGenerator_t", CONV_TYPE, API_RAND)),
(
"curandDirectionVectorSet",
("hiprandDirectionVectorSet_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDirectionVectorSet_t",
("hiprandDirectionVectorSet_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
("curandOrdering", ("hiprandOrdering_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
(
"curandOrdering_t",
("hiprandOrdering_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDistribution_st",
("hiprandDistribution_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandHistogramM2V_st",
("hiprandDistribution_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDistribution_t",
("hiprandDistribution_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandHistogramM2V_t",
("hiprandDistribution_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDistributionShift_st",
("hiprandDistributionShift_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDistributionShift_t",
("hiprandDistributionShift_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDistributionM2Shift_st",
("hiprandDistributionM2Shift_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDistributionM2Shift_t",
("hiprandDistributionM2Shift_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandHistogramM2_st",
("hiprandHistogramM2_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandHistogramM2_t",
("hiprandHistogramM2_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandHistogramM2K_st",
("hiprandHistogramM2K_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandHistogramM2K_t",
("hiprandHistogramM2K_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandDiscreteDistribution_st",
("hiprandDiscreteDistribution_st", CONV_TYPE, API_RAND),
),
(
"curandDiscreteDistribution_t",
("hiprandDiscreteDistribution_t", CONV_TYPE, API_RAND),
),
("curandMethod", ("hiprandMethod_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
("curandMethod_t", ("hiprandMethod_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)),
(
"curandDirectionVectors32_t",
("hiprandDirectionVectors32_t", CONV_TYPE, API_RAND),
),
(
"curandDirectionVectors64_t",
("hiprandDirectionVectors64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
("curandStateMtgp32_t", ("hiprandStateMtgp32_t", CONV_TYPE, API_RAND)),
("curandStateMtgp32", ("hiprandStateMtgp32_t", CONV_TYPE, API_RAND)),
(
"curandStateScrambledSobol64_t",
("hiprandStateScrambledSobol64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandStateSobol64_t",
("hiprandStateSobol64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
(
"curandStateScrambledSobol32_t",
("hiprandStateScrambledSobol32_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED),
),
("curandStateSobol32_t", ("hiprandStateSobol32_t", CONV_TYPE, API_RAND)),
("curandStateMRG32k3a_t", ("hiprandStateMRG32k3a_t", CONV_TYPE, API_RAND)),
(
"curandStatePhilox4_32_10_t",
("hiprandStatePhilox4_32_10_t", CONV_TYPE, API_RAND),
),
("curandStateXORWOW_t", ("hiprandStateXORWOW_t", CONV_TYPE, API_RAND)),
("curandState_t", ("hiprandState_t", CONV_TYPE, API_RAND)),
("curandState", ("hiprandState_t", CONV_TYPE, API_RAND)),
("CUuuid", ("hipUUID", CONV_TYPE, API_RUNTIME)),
("cudaGraph_t", ("hipGraph_t", CONV_TYPE, API_RAND)),
("cudaGraphExec_t", ("hipGraphExec_t", CONV_TYPE, API_RAND)),
("__nv_bfloat16", ("__hip_bfloat16", CONV_TYPE, API_RUNTIME)),
("__nv_bfloat162", ("__hip_bfloat162", CONV_TYPE, API_RUNTIME)),
]
)
CUDA_INCLUDE_MAP = collections.OrderedDict(
[
# since pytorch uses "\b{pattern}\b" as the actual re pattern,
# patterns listed here have to begin and end with alnum chars
(
"include <cuda.h",
("include <hip/hip_runtime.h", CONV_INCLUDE_CUDA_MAIN_H, API_DRIVER),
),
(
'include "cuda.h',
('include "hip/hip_runtime.h', CONV_INCLUDE_CUDA_MAIN_H, API_DRIVER),
),
(
"cuda_runtime.h",
("hip/hip_runtime.h", CONV_INCLUDE_CUDA_MAIN_H, API_RUNTIME),
),
("cuda_runtime_api.h", ("hip/hip_runtime_api.h", CONV_INCLUDE, API_RUNTIME)),
("cuda_profiler_api.h", ("hip/hip_runtime_api.h", CONV_INCLUDE, API_RUNTIME)),
(
"channel_descriptor.h",
("hip/channel_descriptor.h", CONV_INCLUDE, API_RUNTIME),
),
("device_functions.h", ("hip/device_functions.h", CONV_INCLUDE, API_RUNTIME)),
("driver_types.h", ("hip/driver_types.h", CONV_INCLUDE, API_RUNTIME)),
("library_types.h", ("hip/library_types.h", CONV_INCLUDE, API_RUNTIME)),
("cuComplex.h", ("hip/hip_complex.h", CONV_INCLUDE, API_RUNTIME)),
("cuda_fp16.h", ("hip/hip_fp16.h", CONV_INCLUDE, API_RUNTIME)),
("cuda_bf16.h", ("hip/hip_bf16.h", CONV_INCLUDE, API_RUNTIME)),
(
"cuda_texture_types.h",
("hip/hip_texture_types.h", CONV_INCLUDE, API_RUNTIME),
),
("cooperative_groups.h", ("hip/hip_cooperative_groups.h", CONV_INCLUDE, API_RUNTIME)),
("vector_types.h", ("hip/hip_vector_types.h", CONV_INCLUDE, API_RUNTIME)),
("cublas.h", ("hipblas/hipblas.h", CONV_INCLUDE_CUDA_MAIN_H, API_BLAS)),
("cublas_v2.h", ("hipblas/hipblas.h", CONV_INCLUDE_CUDA_MAIN_H, API_BLAS)),
("cublasLt.h", ("hipblaslt/hipblaslt.h", CONV_INCLUDE_CUDA_MAIN_H, API_BLAS)),
("curand.h", ("hiprand/hiprand.h", CONV_INCLUDE_CUDA_MAIN_H, API_RAND)),
("curand_kernel.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_discrete.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_discrete2.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_globals.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_lognormal.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_mrg32k3a.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_mtgp32.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_mtgp32_host.h", ("hiprand/hiprand_mtgp32_host.h", CONV_INCLUDE, API_RAND)),
("curand_mtgp32_kernel.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
(
"curand_mtgp32dc_p_11213.h",
("rocrand/rocrand_mtgp32_11213.h", CONV_INCLUDE, API_RAND),
),
("curand_normal.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_normal_static.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_philox4x32_x.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_poisson.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_precalc.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("curand_uniform.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)),
("cusparse.h", ("hipsparse/hipsparse.h", CONV_INCLUDE, API_RAND)),
("cufft.h", ("hipfft/hipfft.h", CONV_INCLUDE, API_BLAS)),
("cufftXt.h", ("hipfft/hipfftXt.h", CONV_INCLUDE, API_BLAS)),
# PyTorch also has a source file named "nccl.h", so we need to "<"">" to differentiate
("<nccl.h>", (_RCCL_HEADER, CONV_INCLUDE, API_RUNTIME)),
("nvrtc.h", ("hip/hiprtc.h", CONV_INCLUDE, API_RTC)),
("thrust/system/cuda", ("thrust/system/hip", CONV_INCLUDE, API_BLAS)),
("cub/util_allocator.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/block/block_reduce.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/block/block_raking_layout.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/cub.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/config.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/util_ptx.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/util_type.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/device/device_run_length_encode.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/block/block_load.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/block/block_store.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/block/block_scan.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/device/device_radix_sort.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/device/device_reduce.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/device/device_scan.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/device/device_select.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("nvtx3/nvtx3.hpp", ("roctracer/roctx.h", CONV_INCLUDE, API_ROCTX)),
("nvToolsExt.h", ("roctracer/roctx.h", CONV_INCLUDE, API_ROCTX)),
("nvml.h", ("rocm_smi/rocm_smi.h", CONV_INCLUDE, API_ROCMSMI)),
]
)
CUDA_IDENTIFIER_MAP = collections.OrderedDict(
[
("__CUDACC__", ("__HIPCC__", CONV_DEF, API_RUNTIME)),
(
"CUDA_ERROR_INVALID_CONTEXT",
("hipErrorInvalidContext", CONV_TYPE, API_DRIVER),
),
(
"CUDA_ERROR_CONTEXT_ALREADY_CURRENT",
("hipErrorContextAlreadyCurrent", CONV_TYPE, API_DRIVER),
),
(
"CUDA_ERROR_ARRAY_IS_MAPPED",
("hipErrorArrayIsMapped", CONV_TYPE, API_DRIVER),
),
("CUDA_ERROR_ALREADY_MAPPED", ("hipErrorAlreadyMapped", CONV_TYPE, API_DRIVER)),
(
"CUDA_ERROR_ALREADY_ACQUIRED",
("hipErrorAlreadyAcquired", CONV_TYPE, API_DRIVER),
),
("CUDA_ERROR_NOT_MAPPED", ("hipErrorNotMapped", CONV_TYPE, API_DRIVER)),
(
"CUDA_ERROR_NOT_MAPPED_AS_ARRAY",
("hipErrorNotMappedAsArray", CONV_TYPE, API_DRIVER),
),
(
"CUDA_ERROR_NOT_MAPPED_AS_POINTER",
("hipErrorNotMappedAsPointer", CONV_TYPE, API_DRIVER),
),
(
"CUDA_ERROR_CONTEXT_ALREADY_IN_USE",
("hipErrorContextAlreadyInUse", CONV_TYPE, API_DRIVER),
),
("CUDA_ERROR_INVALID_SOURCE", ("hipErrorInvalidSource", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_FILE_NOT_FOUND", ("hipErrorFileNotFound", CONV_TYPE, API_DRIVER)),
("CUDA_ERROR_NOT_FOUND", ("hipErrorNotFound", CONV_TYPE, API_DRIVER)),
(
"CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING",
(
"hipErrorLaunchIncompatibleTexturing",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE",
("hipErrorPrimaryContextActive", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_CONTEXT_IS_DESTROYED",
("hipErrorContextIsDestroyed", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_NOT_PERMITTED",
("hipErrorNotPermitted", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_NOT_SUPPORTED",
("hipErrorNotSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorMissingConfiguration",
("hipErrorMissingConfiguration", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorPriorLaunchFailure",
("hipErrorPriorLaunchFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidDeviceFunction",
("hipErrorInvalidDeviceFunction", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidConfiguration",
("hipErrorInvalidConfiguration", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidPitchValue",
("hipErrorInvalidPitchValue", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidSymbol",
("hipErrorInvalidSymbol", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidHostPointer",
("hipErrorInvalidHostPointer", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidDevicePointer",
("hipErrorInvalidDevicePointer", CONV_TYPE, API_RUNTIME),
),
(
"cudaErrorInvalidTexture",
("hipErrorInvalidTexture", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidTextureBinding",
("hipErrorInvalidTextureBinding", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidChannelDescriptor",
(
"hipErrorInvalidChannelDescriptor",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaErrorInvalidMemcpyDirection",
("hipErrorInvalidMemcpyDirection", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorAddressOfConstant",
("hipErrorAddressOfConstant", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorTextureFetchFailed",
("hipErrorTextureFetchFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorTextureNotBound",
("hipErrorTextureNotBound", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorSynchronizationError",
("hipErrorSynchronizationError", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidFilterSetting",
("hipErrorInvalidFilterSetting", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidNormSetting",
("hipErrorInvalidNormSetting", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorMixedDeviceExecution",
("hipErrorMixedDeviceExecution", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorNotYetImplemented",
("hipErrorNotYetImplemented", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorMemoryValueTooLarge",
("hipErrorMemoryValueTooLarge", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInsufficientDriver",
("hipErrorInsufficientDriver", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorSetOnActiveProcess",
("hipErrorSetOnActiveProcess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidSurface",
("hipErrorInvalidSurface", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorDuplicateVariableName",
("hipErrorDuplicateVariableName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorDuplicateTextureName",
("hipErrorDuplicateTextureName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorDuplicateSurfaceName",
("hipErrorDuplicateSurfaceName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorDevicesUnavailable",
("hipErrorDevicesUnavailable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorIncompatibleDriverContext",
(
"hipErrorIncompatibleDriverContext",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaErrorDeviceAlreadyInUse",
("hipErrorDeviceAlreadyInUse", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorLaunchMaxDepthExceeded",
("hipErrorLaunchMaxDepthExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorLaunchFileScopedTex",
("hipErrorLaunchFileScopedTex", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorLaunchFileScopedSurf",
("hipErrorLaunchFileScopedSurf", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorSyncDepthExceeded",
("hipErrorSyncDepthExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorLaunchPendingCountExceeded",
(
"hipErrorLaunchPendingCountExceeded",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaErrorNotPermitted",
("hipErrorNotPermitted", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorNotSupported",
("hipErrorNotSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorStartupFailure",
("hipErrorStartupFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaErrorApiFailureBase",
("hipErrorApiFailureBase", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("CUDA_SUCCESS", ("hipSuccess", CONV_TYPE, API_DRIVER)),
("cudaSuccess", ("hipSuccess", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_INVALID_VALUE", ("hipErrorInvalidValue", CONV_TYPE, API_DRIVER)),
("cudaErrorInvalidValue", ("hipErrorInvalidValue", CONV_TYPE, API_RUNTIME)),
(
"CUDA_ERROR_OUT_OF_MEMORY",
("hipErrorMemoryAllocation", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorMemoryAllocation",
("hipErrorMemoryAllocation", CONV_TYPE, API_RUNTIME),
),
(
"CUDA_ERROR_NOT_INITIALIZED",
("hipErrorNotInitialized", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorInitializationError",
("hipErrorInitializationError", CONV_TYPE, API_RUNTIME),
),
("CUDA_ERROR_DEINITIALIZED", ("hipErrorDeinitialized", CONV_TYPE, API_DRIVER)),
(
"cudaErrorCudartUnloading",
("hipErrorDeinitialized", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_PROFILER_DISABLED",
("hipErrorProfilerDisabled", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorProfilerDisabled",
("hipErrorProfilerDisabled", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_PROFILER_NOT_INITIALIZED",
("hipErrorProfilerNotInitialized", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorProfilerNotInitialized",
("hipErrorProfilerNotInitialized", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_PROFILER_ALREADY_STARTED",
("hipErrorProfilerAlreadyStarted", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorProfilerAlreadyStarted",
("hipErrorProfilerAlreadyStarted", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_PROFILER_ALREADY_STOPPED",
("hipErrorProfilerAlreadyStopped", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorProfilerAlreadyStopped",
("hipErrorProfilerAlreadyStopped", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("CUDA_ERROR_NO_DEVICE", ("hipErrorNoDevice", CONV_TYPE, API_DRIVER)),
("cudaErrorNoDevice", ("hipErrorNoDevice", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_INVALID_DEVICE", ("hipErrorInvalidDevice", CONV_TYPE, API_DRIVER)),
("cudaErrorInvalidDevice", ("hipErrorInvalidDevice", CONV_TYPE, API_RUNTIME)),
("CUDA_ERROR_INVALID_IMAGE", ("hipErrorInvalidImage", CONV_TYPE, API_DRIVER)),
(
"cudaErrorInvalidKernelImage",
("hipErrorInvalidImage", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("CUDA_ERROR_MAP_FAILED", ("hipErrorMapFailed", CONV_TYPE, API_DRIVER)),
(
"cudaErrorMapBufferObjectFailed",
("hipErrorMapFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("CUDA_ERROR_UNMAP_FAILED", ("hipErrorUnmapFailed", CONV_TYPE, API_DRIVER)),
(
"cudaErrorUnmapBufferObjectFailed",
("hipErrorUnmapFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_NO_BINARY_FOR_GPU",
("hipErrorNoBinaryForGpu", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorNoKernelImageForDevice",
("hipErrorNoBinaryForGpu", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_ECC_UNCORRECTABLE",
("hipErrorECCNotCorrectable", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorECCUncorrectable",
("hipErrorECCNotCorrectable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_UNSUPPORTED_LIMIT",
("hipErrorUnsupportedLimit", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorUnsupportedLimit",
("hipErrorUnsupportedLimit", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_PEER_ACCESS_UNSUPPORTED",
("hipErrorPeerAccessUnsupported", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorPeerAccessUnsupported",
("hipErrorPeerAccessUnsupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_INVALID_PTX",
("hipErrorInvalidKernelFile", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorInvalidPtx",
("hipErrorInvalidKernelFile", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_INVALID_GRAPHICS_CONTEXT",
("hipErrorInvalidGraphicsContext", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorInvalidGraphicsContext",
("hipErrorInvalidGraphicsContext", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_NVLINK_UNCORRECTABLE",
("hipErrorNvlinkUncorrectable", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorNvlinkUncorrectable",
("hipErrorNvlinkUncorrectable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND",
("hipErrorSharedObjectSymbolNotFound", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorSharedObjectSymbolNotFound",
(
"hipErrorSharedObjectSymbolNotFound",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"CUDA_ERROR_SHARED_OBJECT_INIT_FAILED",
("hipErrorSharedObjectInitFailed", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorSharedObjectInitFailed",
("hipErrorSharedObjectInitFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_OPERATING_SYSTEM",
("hipErrorOperatingSystem", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorOperatingSystem",
("hipErrorOperatingSystem", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_INVALID_HANDLE",
("hipErrorInvalidResourceHandle", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorInvalidResourceHandle",
("hipErrorInvalidResourceHandle", CONV_TYPE, API_RUNTIME),
),
("CUDA_ERROR_NOT_READY", ("hipErrorNotReady", CONV_TYPE, API_DRIVER)),
("cudaErrorNotReady", ("hipErrorNotReady", CONV_TYPE, API_RUNTIME)),
(
"CUDA_ERROR_ILLEGAL_ADDRESS",
("hipErrorIllegalAddress", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorIllegalAddress",
("hipErrorIllegalAddress", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES",
("hipErrorLaunchOutOfResources", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorLaunchOutOfResources",
("hipErrorLaunchOutOfResources", CONV_TYPE, API_RUNTIME),
),
("CUDA_ERROR_LAUNCH_TIMEOUT", ("hipErrorLaunchTimeOut", CONV_TYPE, API_DRIVER)),
(
"cudaErrorLaunchTimeout",
("hipErrorLaunchTimeOut", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED",
("hipErrorPeerAccessAlreadyEnabled", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorPeerAccessAlreadyEnabled",
("hipErrorPeerAccessAlreadyEnabled", CONV_TYPE, API_RUNTIME),
),
(
"CUDA_ERROR_PEER_ACCESS_NOT_ENABLED",
("hipErrorPeerAccessNotEnabled", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorPeerAccessNotEnabled",
("hipErrorPeerAccessNotEnabled", CONV_TYPE, API_RUNTIME),
),
(
"CUDA_ERROR_ASSERT",
("hipErrorAssert", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorAssert",
("hipErrorAssert", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_TOO_MANY_PEERS",
("hipErrorTooManyPeers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorTooManyPeers",
("hipErrorTooManyPeers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED",
("hipErrorHostMemoryAlreadyRegistered", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorHostMemoryAlreadyRegistered",
("hipErrorHostMemoryAlreadyRegistered", CONV_TYPE, API_RUNTIME),
),
(
"CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED",
("hipErrorHostMemoryNotRegistered", CONV_TYPE, API_DRIVER),
),
(
"cudaErrorHostMemoryNotRegistered",
("hipErrorHostMemoryNotRegistered", CONV_TYPE, API_RUNTIME),
),
(
"CUDA_ERROR_HARDWARE_STACK_ERROR",
("hipErrorHardwareStackError", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorHardwareStackError",
("hipErrorHardwareStackError", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_ILLEGAL_INSTRUCTION",
("hipErrorIllegalInstruction", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorIllegalInstruction",
("hipErrorIllegalInstruction", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_MISALIGNED_ADDRESS",
("hipErrorMisalignedAddress", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorMisalignedAddress",
("hipErrorMisalignedAddress", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_INVALID_ADDRESS_SPACE",
("hipErrorInvalidAddressSpace", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidAddressSpace",
("hipErrorInvalidAddressSpace", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_INVALID_PC",
("hipErrorInvalidPc", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorInvalidPc",
("hipErrorInvalidPc", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_LAUNCH_FAILED",
("hipErrorLaunchFailure", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cudaErrorLaunchFailure",
("hipErrorLaunchFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"CUDA_ERROR_UNKNOWN",
("hipErrorUnknown", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("cudaErrorUnknown", ("hipErrorUnknown", CONV_TYPE, API_RUNTIME)),
(
"CU_TR_ADDRESS_MODE_WRAP",
("HIP_TR_ADDRESS_MODE_WRAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TR_ADDRESS_MODE_CLAMP",
("HIP_TR_ADDRESS_MODE_CLAMP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TR_ADDRESS_MODE_MIRROR",
("HIP_TR_ADDRESS_MODE_MIRROR", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TR_ADDRESS_MODE_BORDER",
("HIP_TR_ADDRESS_MODE_BORDER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CUBEMAP_FACE_POSITIVE_X",
("HIP_CUBEMAP_FACE_POSITIVE_X", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CUBEMAP_FACE_NEGATIVE_X",
("HIP_CUBEMAP_FACE_NEGATIVE_X", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CUBEMAP_FACE_POSITIVE_Y",
("HIP_CUBEMAP_FACE_POSITIVE_Y", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CUBEMAP_FACE_NEGATIVE_Y",
("HIP_CUBEMAP_FACE_NEGATIVE_Y", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CUBEMAP_FACE_POSITIVE_Z",
("HIP_CUBEMAP_FACE_POSITIVE_Z", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CUBEMAP_FACE_NEGATIVE_Z",
("HIP_CUBEMAP_FACE_NEGATIVE_Z", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_AD_FORMAT_UNSIGNED_INT8",
("HIP_AD_FORMAT_UNSIGNED_INT8", CONV_TYPE, API_DRIVER),
),
(
"CU_AD_FORMAT_UNSIGNED_INT16",
("HIP_AD_FORMAT_UNSIGNED_INT16", CONV_TYPE, API_DRIVER),
),
(
"CU_AD_FORMAT_UNSIGNED_INT32",
("HIP_AD_FORMAT_UNSIGNED_INT32", CONV_TYPE, API_DRIVER),
),
(
"CU_AD_FORMAT_SIGNED_INT8",
("HIP_AD_FORMAT_SIGNED_INT8", CONV_TYPE, API_DRIVER),
),
(
"CU_AD_FORMAT_SIGNED_INT16",
("HIP_AD_FORMAT_SIGNED_INT16", CONV_TYPE, API_DRIVER),
),
(
"CU_AD_FORMAT_SIGNED_INT32",
("HIP_AD_FORMAT_SIGNED_INT32", CONV_TYPE, API_DRIVER),
),
("CU_AD_FORMAT_HALF", ("HIP_AD_FORMAT_HALF", CONV_TYPE, API_DRIVER)),
("CU_AD_FORMAT_FLOAT", ("HIP_AD_FORMAT_FLOAT", CONV_TYPE, API_DRIVER)),
(
"CU_COMPUTEMODE_DEFAULT",
("hipComputeModeDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_COMPUTEMODE_EXCLUSIVE",
("hipComputeModeExclusive", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_COMPUTEMODE_PROHIBITED",
("hipComputeModeProhibited", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_COMPUTEMODE_EXCLUSIVE_PROCESS",
("hipComputeModeExclusiveProcess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ADVISE_SET_READ_MOSTLY",
("hipMemAdviseSetReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ADVISE_UNSET_READ_MOSTLY",
("hipMemAdviseUnsetReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ADVISE_SET_PREFERRED_LOCATION",
(
"hipMemAdviseSetPreferredLocation",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION",
(
"hipMemAdviseUnsetPreferredLocation",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_MEM_ADVISE_SET_ACCESSED_BY",
("hipMemAdviseSetAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ADVISE_UNSET_ACCESSED_BY",
("hipMemAdviseUnsetAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY",
("hipMemRangeAttributeReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION",
(
"hipMemRangeAttributePreferredLocation",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY",
("hipMemRangeAttributeAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION",
(
"hipMemRangeAttributeLastPrefetchLocation",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_CTX_SCHED_AUTO",
("HIP_CTX_SCHED_AUTO", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_SCHED_SPIN",
("HIP_CTX_SCHED_SPIN", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_SCHED_YIELD",
("HIP_CTX_SCHED_YIELD", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_SCHED_BLOCKING_SYNC",
("HIP_CTX_SCHED_BLOCKING_SYNC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_BLOCKING_SYNC",
("HIP_CTX_BLOCKING_SYNC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_SCHED_MASK",
("HIP_CTX_SCHED_MASK", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_MAP_HOST",
("HIP_CTX_MAP_HOST", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_LMEM_RESIZE_TO_MAX",
("HIP_CTX_LMEM_RESIZE_TO_MAX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_CTX_FLAGS_MASK",
("HIP_CTX_FLAGS_MASK", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_LAUNCH_PARAM_BUFFER_POINTER",
("HIP_LAUNCH_PARAM_BUFFER_POINTER", CONV_TYPE, API_DRIVER),
),
(
"CU_LAUNCH_PARAM_BUFFER_SIZE",
("HIP_LAUNCH_PARAM_BUFFER_SIZE", CONV_TYPE, API_DRIVER),
),
("CU_LAUNCH_PARAM_END", ("HIP_LAUNCH_PARAM_END", CONV_TYPE, API_DRIVER)),
(
"CU_IPC_HANDLE_SIZE",
("HIP_IPC_HANDLE_SIZE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMHOSTALLOC_DEVICEMAP",
("HIP_MEMHOSTALLOC_DEVICEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMHOSTALLOC_PORTABLE",
("HIP_MEMHOSTALLOC_PORTABLE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMHOSTALLOC_WRITECOMBINED",
("HIP_MEMHOSTALLOC_WRITECOMBINED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMHOSTREGISTER_DEVICEMAP",
("HIP_MEMHOSTREGISTER_DEVICEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMHOSTREGISTER_IOMEMORY",
("HIP_MEMHOSTREGISTER_IOMEMORY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMHOSTREGISTER_PORTABLE",
("HIP_MEMHOSTREGISTER_PORTABLE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_PARAM_TR_DEFAULT",
("HIP_PARAM_TR_DEFAULT", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_LEGACY",
("HIP_STREAM_LEGACY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_PER_THREAD",
("HIP_STREAM_PER_THREAD", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TRSA_OVERRIDE_FORMAT",
("HIP_TRSA_OVERRIDE_FORMAT", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TRSF_NORMALIZED_COORDINATES",
("HIP_TRSF_NORMALIZED_COORDINATES", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TRSF_READ_AS_INTEGER",
("HIP_TRSF_READ_AS_INTEGER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CU_TRSF_SRGB", ("HIP_TRSF_SRGB", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)),
(
"CUDA_ARRAY3D_2DARRAY",
("HIP_ARRAY3D_LAYERED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ARRAY3D_CUBEMAP",
("HIP_ARRAY3D_CUBEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ARRAY3D_DEPTH_TEXTURE",
("HIP_ARRAY3D_DEPTH_TEXTURE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ARRAY3D_LAYERED",
("HIP_ARRAY3D_LAYERED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ARRAY3D_SURFACE_LDST",
("HIP_ARRAY3D_SURFACE_LDST", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CUDA_ARRAY3D_TEXTURE_GATHER",
("HIP_ARRAY3D_TEXTURE_GATHER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK",
(
"hipDeviceAttributeMaxThreadsPerBlock",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X",
("hipDeviceAttributeMaxBlockDimX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y",
("hipDeviceAttributeMaxBlockDimY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z",
("hipDeviceAttributeMaxBlockDimZ", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X",
("hipDeviceAttributeMaxGridDimX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y",
("hipDeviceAttributeMaxGridDimY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z",
("hipDeviceAttributeMaxGridDimZ", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK",
(
"hipDeviceAttributeMaxSharedMemoryPerBlock",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK",
(
"hipDeviceAttributeMaxSharedMemoryPerBlock",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY",
(
"hipDeviceAttributeTotalConstantMemory",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_WARP_SIZE",
("hipDeviceAttributeWarpSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_PITCH",
("hipDeviceAttributeMaxPitch", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK",
(
"hipDeviceAttributeMaxRegistersPerBlock",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK",
(
"hipDeviceAttributeMaxRegistersPerBlock",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_CLOCK_RATE",
("hipDeviceAttributeClockRate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT",
(
"hipDeviceAttributeTextureAlignment",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_GPU_OVERLAP",
(
"hipDeviceAttributeAsyncEngineCount",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT",
(
"hipDeviceAttributeMultiprocessorCount",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT",
(
"hipDeviceAttributeKernelExecTimeout",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_INTEGRATED",
("hipDeviceAttributeIntegrated", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY",
(
"hipDeviceAttributeCanMapHostMemory",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_COMPUTE_MODE",
("hipDeviceAttributeComputeMode", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH",
(
"hipDeviceAttributeMaxTexture1DWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH",
(
"hipDeviceAttributeMaxTexture2DWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT",
(
"hipDeviceAttributeMaxTexture2DHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH",
(
"hipDeviceAttributeMaxTexture3DWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT",
(
"hipDeviceAttributeMaxTexture3DHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH",
(
"hipDeviceAttributeMaxTexture3DDepth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH",
(
"hipDeviceAttributeMaxTexture2DLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT",
(
"hipDeviceAttributeMaxTexture2DLayeredHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS",
(
"hipDeviceAttributeMaxTexture2DLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH",
(
"hipDeviceAttributeMaxTexture2DLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT",
(
"hipDeviceAttributeMaxTexture2DLayeredHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES",
(
"hipDeviceAttributeMaxTexture2DLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT",
(
"hipDeviceAttributeSurfaceAlignment",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS",
("hipDeviceAttributeConcurrentKernels", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_ECC_ENABLED",
("hipDeviceAttributeEccEnabled", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_PCI_BUS_ID",
("hipDeviceAttributePciBusId", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID",
("hipDeviceAttributePciDeviceId", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_TCC_DRIVER",
("hipDeviceAttributeTccDriver", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE",
(
"hipDeviceAttributeMemoryClockRate",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH",
("hipDeviceAttributeMemoryBusWidth", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE",
("hipDeviceAttributeL2CacheSize", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR",
("hipDeviceAttributeMaxThreadsPerMultiProcessor", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT",
(
"hipDeviceAttributeAsyncEngineCount",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING",
(
"hipDeviceAttributeUnifiedAddressing",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH",
(
"hipDeviceAttributeMaxTexture1DLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS",
(
"hipDeviceAttributeMaxTexture1DLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER",
(
"hipDeviceAttributeCanTex2DGather",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH",
(
"hipDeviceAttributeMaxTexture2DGatherWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT",
(
"hipDeviceAttributeMaxTexture2DGatherHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE",
(
"hipDeviceAttributeMaxTexture3DWidthAlternate",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE",
(
"hipDeviceAttributeMaxTexture3DHeightAlternate",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE",
(
"hipDeviceAttributeMaxTexture3DDepthAlternate",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID",
("hipDeviceAttributePciDomainId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT",
(
"hipDeviceAttributeTexturePitchAlignment",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH",
(
"hipDeviceAttributeMaxTextureCubemapWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH",
(
"hipDeviceAttributeMaxTextureCubemapLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS",
(
"hipDeviceAttributeMaxTextureCubemapLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH",
(
"hipDeviceAttributeMaxSurface1DWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH",
(
"hipDeviceAttributeMaxSurface2DWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT",
(
"hipDeviceAttributeMaxSurface2DHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH",
(
"hipDeviceAttributeMaxSurface3DWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT",
(
"hipDeviceAttributeMaxSurface3DHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH",
(
"hipDeviceAttributeMaxSurface3DDepth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH",
(
"hipDeviceAttributeMaxSurface1DLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS",
(
"hipDeviceAttributeMaxSurface1DLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH",
(
"hipDeviceAttributeMaxSurface2DLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT",
(
"hipDeviceAttributeMaxSurface2DLayeredHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS",
(
"hipDeviceAttributeMaxSurface2DLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH",
(
"hipDeviceAttributeMaxSurfaceCubemapWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH",
(
"hipDeviceAttributeMaxSurfaceCubemapLayeredWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS",
(
"hipDeviceAttributeMaxSurfaceCubemapLayeredLayers",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH",
(
"hipDeviceAttributeMaxTexture1DLinearWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH",
(
"hipDeviceAttributeMaxTexture2DLinearWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT",
(
"hipDeviceAttributeMaxTexture2DLinearHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH",
(
"hipDeviceAttributeMaxTexture2DLinearPitch",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH",
(
"hipDeviceAttributeMaxTexture2DMipmappedWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT",
(
"hipDeviceAttributeMaxTexture2DMipmappedHeight",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR",
("hipDeviceAttributeComputeCapabilityMajor", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR",
("hipDeviceAttributeComputeCapabilityMinor", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH",
(
"hipDeviceAttributeMaxTexture1DMipmappedWidth",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED",
(
"hipDeviceAttributeStreamPrioritiesSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED",
(
"hipDeviceAttributeGlobalL1CacheSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED",
(
"hipDeviceAttributeLocalL1CacheSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR",
(
"hipDeviceAttributeMaxSharedMemoryPerMultiprocessor",
CONV_TYPE,
API_DRIVER,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR",
(
"hipDeviceAttributeMaxRegistersPerMultiprocessor",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY",
("hipDeviceAttributeManagedMemory", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD",
("hipDeviceAttributeIsMultiGpuBoard", CONV_TYPE, API_DRIVER),
),
(
"CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID",
(
"hipDeviceAttributeMultiGpuBoardGroupId",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED",
(
"hipDeviceAttributeHostNativeAtomicSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO",
(
"hipDeviceAttributeSingleToDoublePrecisionPerfRatio",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS",
(
"hipDeviceAttributePageableMemoryAccess",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS",
(
"hipDeviceAttributeConcurrentManagedAccess",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED",
(
"hipDeviceAttributeComputePreemptionSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM",
(
"hipDeviceAttributeCanUseHostPointerForRegisteredMem",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_ATTRIBUTE_MAX",
("hipDeviceAttributeMax", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_CONTEXT",
("hipPointerAttributeContext", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_MEMORY_TYPE",
("hipPointerAttributeMemoryType", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_DEVICE_POINTER",
(
"hipPointerAttributeDevicePointer",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_POINTER_ATTRIBUTE_HOST_POINTER",
("hipPointerAttributeHostPointer", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_P2P_TOKENS",
("hipPointerAttributeP2pTokens", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_SYNC_MEMOPS",
("hipPointerAttributeSyncMemops", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_BUFFER_ID",
("hipPointerAttributeBufferId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_POINTER_ATTRIBUTE_IS_MANAGED",
("hipPointerAttributeIsManaged", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK",
(
"hipFuncAttributeMaxThreadsPerBlocks",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES",
("hipFuncAttributeSharedSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES",
("hipFuncAttributeMaxDynamicSharedMemorySize", CONV_TYPE, API_RUNTIME),
),
(
"CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES",
("hipFuncAttributeConstSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES",
("hipFuncAttributeLocalSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_NUM_REGS",
("hipFuncAttributeNumRegs", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_PTX_VERSION",
("hipFuncAttributePtxVersion", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_BINARY_VERSION",
("hipFuncAttributeBinaryVersion", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_CACHE_MODE_CA",
("hipFuncAttributeCacheModeCA", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_FUNC_ATTRIBUTE_MAX",
("hipFuncAttributeMax", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE",
("hipGraphicsMapFlagsNone", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY",
("hipGraphicsMapFlagsReadOnly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD",
("hipGraphicsMapFlagsWriteDiscard", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GRAPHICS_REGISTER_FLAGS_NONE",
("hipGraphicsRegisterFlagsNone", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY",
(
"hipGraphicsRegisterFlagsReadOnly",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD",
(
"hipGraphicsRegisterFlagsWriteDiscard",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST",
(
"hipGraphicsRegisterFlagsSurfaceLoadStore",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER",
(
"hipGraphicsRegisterFlagsTextureGather",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_OCCUPANCY_DEFAULT",
("hipOccupancyDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE",
(
"hipOccupancyDisableCachingOverride",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_FUNC_CACHE_PREFER_NONE",
("hipFuncCachePreferNone", CONV_CACHE, API_DRIVER),
),
(
"CU_FUNC_CACHE_PREFER_SHARED",
("hipFuncCachePreferShared", CONV_CACHE, API_DRIVER),
),
("CU_FUNC_CACHE_PREFER_L1", ("hipFuncCachePreferL1", CONV_CACHE, API_DRIVER)),
(
"CU_FUNC_CACHE_PREFER_EQUAL",
("hipFuncCachePreferEqual", CONV_CACHE, API_DRIVER),
),
(
"CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS",
("hipIpcMemLazyEnablePeerAccess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
("CUDA_IPC_HANDLE_SIZE", ("HIP_IPC_HANDLE_SIZE", CONV_TYPE, API_DRIVER)),
(
"CU_JIT_CACHE_OPTION_NONE",
("hipJitCacheModeOptionNone", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_CACHE_OPTION_CG",
("hipJitCacheModeOptionCG", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_CACHE_OPTION_CA",
("hipJitCacheModeOptionCA", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_PREFER_PTX",
("hipJitFallbackPreferPtx", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_PREFER_BINARY",
("hipJitFallbackPreferBinary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
("CU_JIT_MAX_REGISTERS", ("hipJitOptionMaxRegisters", CONV_JIT, API_DRIVER)),
(
"CU_JIT_THREADS_PER_BLOCK",
("hipJitOptionThreadsPerBlock", CONV_JIT, API_DRIVER),
),
("CU_JIT_WALL_TIME", ("hipJitOptionWallTime", CONV_JIT, API_DRIVER)),
("CU_JIT_INFO_LOG_BUFFER", ("hipJitOptionInfoLogBuffer", CONV_JIT, API_DRIVER)),
(
"CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES",
("hipJitOptionInfoLogBufferSizeBytes", CONV_JIT, API_DRIVER),
),
(
"CU_JIT_ERROR_LOG_BUFFER",
("hipJitOptionErrorLogBuffer", CONV_JIT, API_DRIVER),
),
(
"CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES",
("hipJitOptionErrorLogBufferSizeBytes", CONV_JIT, API_DRIVER),
),
(
"CU_JIT_OPTIMIZATION_LEVEL",
("hipJitOptionOptimizationLevel", CONV_JIT, API_DRIVER),
),
(
"CU_JIT_TARGET_FROM_CUCONTEXT",
("hipJitOptionTargetFromContext", CONV_JIT, API_DRIVER),
),
("CU_JIT_TARGET", ("hipJitOptionTarget", CONV_JIT, API_DRIVER)),
(
"CU_JIT_FALLBACK_STRATEGY",
("hipJitOptionFallbackStrategy", CONV_JIT, API_DRIVER),
),
(
"CU_JIT_GENERATE_DEBUG_INFO",
("hipJitOptionGenerateDebugInfo", CONV_JIT, API_DRIVER),
),
("CU_JIT_LOG_VERBOSE", ("hipJitOptionLogVerbose", CONV_JIT, API_DRIVER)),
(
"CU_JIT_GENERATE_LINE_INFO",
("hipJitOptionGenerateLineInfo", CONV_JIT, API_DRIVER),
),
("CU_JIT_CACHE_MODE", ("hipJitOptionCacheMode", CONV_JIT, API_DRIVER)),
("CU_JIT_NEW_SM3X_OPT", ("hipJitOptionSm3xOpt", CONV_JIT, API_DRIVER)),
("CU_JIT_FAST_COMPILE", ("hipJitOptionFastCompile", CONV_JIT, API_DRIVER)),
("CU_JIT_NUM_OPTIONS", ("hipJitOptionNumOptions", CONV_JIT, API_DRIVER)),
(
"CU_TARGET_COMPUTE_10",
("hipJitTargetCompute10", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_11",
("hipJitTargetCompute11", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_12",
("hipJitTargetCompute12", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_13",
("hipJitTargetCompute13", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_20",
("hipJitTargetCompute20", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_21",
("hipJitTargetCompute21", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_30",
("hipJitTargetCompute30", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_32",
("hipJitTargetCompute32", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_35",
("hipJitTargetCompute35", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_37",
("hipJitTargetCompute37", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_50",
("hipJitTargetCompute50", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_52",
("hipJitTargetCompute52", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_53",
("hipJitTargetCompute53", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_60",
("hipJitTargetCompute60", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_61",
("hipJitTargetCompute61", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_TARGET_COMPUTE_62",
("hipJitTargetCompute62", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_INPUT_CUBIN",
("hipJitInputTypeBin", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_INPUT_PTX",
("hipJitInputTypePtx", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_INPUT_FATBINARY",
("hipJitInputTypeFatBinary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_INPUT_OBJECT",
("hipJitInputTypeObject", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_INPUT_LIBRARY",
("hipJitInputTypeLibrary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_JIT_NUM_INPUT_TYPES",
("hipJitInputTypeNumInputTypes", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_LIMIT_STACK_SIZE",
("hipLimitStackSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_LIMIT_PRINTF_FIFO_SIZE",
("hipLimitPrintfFifoSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_LIMIT_MALLOC_HEAP_SIZE",
("hipLimitMallocHeapSize", CONV_TYPE, API_DRIVER),
),
(
"CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH",
("hipLimitDevRuntimeSyncDepth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT",
(
"hipLimitDevRuntimePendingLaunchCount",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_LIMIT_STACK_SIZE",
("hipLimitStackSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ATTACH_GLOBAL",
("hipMemAttachGlobal", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ATTACH_HOST",
("hipMemAttachHost", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEM_ATTACH_SINGLE",
("hipMemAttachSingle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMORYTYPE_HOST",
("hipMemTypeHost", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMORYTYPE_DEVICE",
("hipMemTypeDevice", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMORYTYPE_ARRAY",
("hipMemTypeArray", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_MEMORYTYPE_UNIFIED",
("hipMemTypeUnified", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_RESOURCE_TYPE_ARRAY",
("hipResourceTypeArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_RESOURCE_TYPE_MIPMAPPED_ARRAY",
("hipResourceTypeMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_RESOURCE_TYPE_LINEAR",
("hipResourceTypeLinear", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_RESOURCE_TYPE_PITCH2D",
("hipResourceTypePitch2D", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
("CU_RES_VIEW_FORMAT_NONE", ("hipResViewFormatNone", CONV_TEX, API_DRIVER)),
(
"CU_RES_VIEW_FORMAT_UINT_1X8",
("hipResViewFormatUnsignedChar1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_2X8",
("hipResViewFormatUnsignedChar2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_4X8",
("hipResViewFormatUnsignedChar4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_1X8",
("hipResViewFormatSignedChar1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_2X8",
("hipResViewFormatSignedChar2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_4X8",
("hipResViewFormatSignedChar4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_1X16",
("hipResViewFormatUnsignedShort1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_2X16",
("hipResViewFormatUnsignedShort2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_4X16",
("hipResViewFormatUnsignedShort4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_1X16",
("hipResViewFormatSignedShort1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_2X16",
("hipResViewFormatSignedShort2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_4X16",
("hipResViewFormatSignedShort4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_1X32",
("hipResViewFormatUnsignedInt1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_2X32",
("hipResViewFormatUnsignedInt2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UINT_4X32",
("hipResViewFormatUnsignedInt4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_1X32",
("hipResViewFormatSignedInt1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_2X32",
("hipResViewFormatSignedInt2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SINT_4X32",
("hipResViewFormatSignedInt4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_FLOAT_1X16",
("hipResViewFormatHalf1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_FLOAT_2X16",
("hipResViewFormatHalf2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_FLOAT_4X16",
("hipResViewFormatHalf4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_FLOAT_1X32",
("hipResViewFormatFloat1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_FLOAT_2X32",
("hipResViewFormatFloat2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_FLOAT_4X32",
("hipResViewFormatFloat4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC1",
("hipResViewFormatUnsignedBlockCompressed1", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC2",
("hipResViewFormatUnsignedBlockCompressed2", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC3",
("hipResViewFormatUnsignedBlockCompressed3", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC4",
("hipResViewFormatUnsignedBlockCompressed4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SIGNED_BC4",
("hipResViewFormatSignedBlockCompressed4", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC5",
("hipResViewFormatUnsignedBlockCompressed5", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SIGNED_BC5",
("hipResViewFormatSignedBlockCompressed5", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC6H",
("hipResViewFormatUnsignedBlockCompressed6H", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_SIGNED_BC6H",
("hipResViewFormatSignedBlockCompressed6H", CONV_TEX, API_DRIVER),
),
(
"CU_RES_VIEW_FORMAT_UNSIGNED_BC7",
("hipResViewFormatUnsignedBlockCompressed7", CONV_TEX, API_DRIVER),
),
(
"CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE",
("hipSharedMemBankSizeDefault", CONV_TYPE, API_DRIVER),
),
(
"CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE",
("hipSharedMemBankSizeFourByte", CONV_TYPE, API_DRIVER),
),
(
"CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE",
("hipSharedMemBankSizeEightByte", CONV_TYPE, API_DRIVER),
),
("CU_STREAM_DEFAULT", ("hipStreamDefault", CONV_TYPE, API_DRIVER)),
("CU_STREAM_NON_BLOCKING", ("hipStreamNonBlocking", CONV_TYPE, API_DRIVER)),
(
"CU_STREAM_WAIT_VALUE_GEQ",
("hipStreamWaitValueGeq", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_WAIT_VALUE_EQ",
("hipStreamWaitValueEq", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_WAIT_VALUE_AND",
("hipStreamWaitValueAnd", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_WAIT_VALUE_FLUSH",
("hipStreamWaitValueFlush", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_WRITE_VALUE_DEFAULT",
("hipStreamWriteValueDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER",
(
"hipStreamWriteValueNoMemoryBarrier",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_STREAM_MEM_OP_WAIT_VALUE_32",
("hipStreamBatchMemOpWaitValue32", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_MEM_OP_WRITE_VALUE_32",
("hipStreamBatchMemOpWriteValue32", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES",
(
"hipStreamBatchMemOpFlushRemoteWrites",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuGetErrorName",
("hipGetErrorName", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGetErrorString",
("hipDrvGetErrorString", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED),
),
("cuInit", ("hipInit", CONV_INIT, API_DRIVER)),
("cuDriverGetVersion", ("hipDriverGetVersion", CONV_VERSION, API_DRIVER)),
("cuCtxCreate", ("hipCtxCreate", CONV_CONTEXT, API_DRIVER)),
("cuCtxCreate_v2", ("hipCtxCreate", CONV_CONTEXT, API_DRIVER)),
("cuCtxDestroy", ("hipCtxDestroy", CONV_CONTEXT, API_DRIVER)),
("cuCtxDestroy_v2", ("hipCtxDestroy", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetApiVersion", ("hipCtxGetApiVersion", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetCacheConfig", ("hipCtxGetCacheConfig", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetCurrent", ("hipCtxGetCurrent", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetDevice", ("hipCtxGetDevice", CONV_CONTEXT, API_DRIVER)),
("cuCtxGetFlags", ("hipCtxGetFlags", CONV_CONTEXT, API_DRIVER)),
("cuDeviceGetUuid", ("hipDeviceGetUuid", CONV_CONTEXT, API_DRIVER)),
(
"cuCtxGetLimit",
("hipCtxGetLimit", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuCtxGetSharedMemConfig",
("hipCtxGetSharedMemConfig", CONV_CONTEXT, API_DRIVER),
),
(
"cuCtxGetStreamPriorityRange",
("hipCtxGetStreamPriorityRange", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED),
),
("cuCtxPopCurrent_v2", ("hipCtxPopCurrent", CONV_CONTEXT, API_DRIVER)),
("cuCtxPushCurrent_v2", ("hipCtxPushCurrent", CONV_CONTEXT, API_DRIVER)),
("cuCtxSetCacheConfig", ("hipCtxSetCacheConfig", CONV_CONTEXT, API_DRIVER)),
("cuCtxSetCurrent", ("hipCtxSetCurrent", CONV_CONTEXT, API_DRIVER)),
(
"cuCtxSetLimit",
("hipCtxSetLimit", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuCtxSetSharedMemConfig",
("hipCtxSetSharedMemConfig", CONV_CONTEXT, API_DRIVER),
),
("cuCtxSynchronize", ("hipCtxSynchronize", CONV_CONTEXT, API_DRIVER)),
("cuCtxAttach", ("hipCtxAttach", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)),
("cuCtxDetach", ("hipCtxDetach", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)),
("cuCtxEnablePeerAccess", ("hipCtxEnablePeerAccess", CONV_PEER, API_DRIVER)),
("cuCtxDisablePeerAccess", ("hipCtxDisablePeerAccess", CONV_PEER, API_DRIVER)),
("cuDeviceCanAccessPeer", ("hipDeviceCanAccessPeer", CONV_PEER, API_DRIVER)),
(
"cuDeviceGetP2PAttribute",
("hipDeviceGetP2PAttribute", CONV_PEER, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuDevicePrimaryCtxGetState",
("hipDevicePrimaryCtxGetState", CONV_CONTEXT, API_DRIVER),
),
(
"cuDevicePrimaryCtxRelease",
("hipDevicePrimaryCtxRelease", CONV_CONTEXT, API_DRIVER),
),
(
"cuDevicePrimaryCtxReset",
("hipDevicePrimaryCtxReset", CONV_CONTEXT, API_DRIVER),
),
(
"cuDevicePrimaryCtxRetain",
("hipDevicePrimaryCtxRetain", CONV_CONTEXT, API_DRIVER),
),
(
"cuDevicePrimaryCtxSetFlags",
("hipDevicePrimaryCtxSetFlags", CONV_CONTEXT, API_DRIVER),
),
("cuDeviceGet", ("hipDeviceGet", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetName", ("hipDeviceGetName", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetCount", ("hipGetDeviceCount", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetAttribute", ("hipDeviceGetAttribute", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetPCIBusId", ("hipDeviceGetPCIBusId", CONV_DEVICE, API_DRIVER)),
("cuDeviceGetByPCIBusId", ("hipDeviceGetByPCIBusId", CONV_DEVICE, API_DRIVER)),
("cuDeviceTotalMem_v2", ("hipDeviceTotalMem", CONV_DEVICE, API_DRIVER)),
(
"cuDeviceComputeCapability",
("hipDeviceComputeCapability", CONV_DEVICE, API_DRIVER),
),
("cuDeviceGetProperties", ("hipGetDeviceProperties", CONV_DEVICE, API_DRIVER)),
("cuLinkAddData", ("hipLinkAddData", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLinkAddFile", ("hipLinkAddFile", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuLinkComplete",
("hipLinkComplete", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuLinkCreate", ("hipLinkCreate", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLinkDestroy", ("hipLinkDestroy", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuModuleGetFunction", ("hipModuleGetFunction", CONV_MODULE, API_DRIVER)),
("cuModuleGetGlobal_v2", ("hipModuleGetGlobal", CONV_MODULE, API_DRIVER)),
(
"cuModuleGetSurfRef",
("hipModuleGetSurfRef", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuModuleGetTexRef", ("hipModuleGetTexRef", CONV_MODULE, API_DRIVER)),
("cuModuleLoad", ("hipModuleLoad", CONV_MODULE, API_DRIVER)),
("cuModuleLoadData", ("hipModuleLoadData", CONV_MODULE, API_DRIVER)),
("cuModuleLoadDataEx", ("hipModuleLoadDataEx", CONV_MODULE, API_DRIVER)),
(
"cuModuleLoadFatBinary",
("hipModuleLoadFatBinary", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuModuleUnload", ("hipModuleUnload", CONV_MODULE, API_DRIVER)),
(
"CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK",
(
"hipDeviceP2PAttributePerformanceRank",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED",
(
"hipDeviceP2PAttributeAccessSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED",
(
"hipDeviceP2PAttributeNativeAtomicSupported",
CONV_TYPE,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
("CU_EVENT_DEFAULT", ("hipEventDefault", CONV_EVENT, API_DRIVER)),
("CU_EVENT_BLOCKING_SYNC", ("hipEventBlockingSync", CONV_EVENT, API_DRIVER)),
("CU_EVENT_DISABLE_TIMING", ("hipEventDisableTiming", CONV_EVENT, API_DRIVER)),
("CU_EVENT_INTERPROCESS", ("hipEventInterprocess", CONV_EVENT, API_DRIVER)),
("cuEventCreate", ("hipEventCreate", CONV_EVENT, API_DRIVER)),
("cuEventDestroy", ("hipEventDestroy", CONV_EVENT, API_DRIVER)),
("cuEventDestroy_v2", ("hipEventDestroy", CONV_EVENT, API_DRIVER)),
("cuEventElapsedTime", ("hipEventElapsedTime", CONV_EVENT, API_DRIVER)),
("cuEventQuery", ("hipEventQuery", CONV_EVENT, API_DRIVER)),
("cuEventRecord", ("hipEventRecord", CONV_EVENT, API_DRIVER)),
("cuEventSynchronize", ("hipEventSynchronize", CONV_EVENT, API_DRIVER)),
("cuFuncSetAttribute", ("hipFuncSetAttribute", CONV_EVENT, API_DRIVER)),
(
"cuFuncGetAttribute",
("hipFuncGetAttribute", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuFuncSetCacheConfig", ("hipFuncSetCacheConfig", CONV_MODULE, API_DRIVER)),
(
"cuFuncSetSharedMemConfig",
("hipFuncSetSharedMemConfig", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuLaunchKernel", ("hipModuleLaunchKernel", CONV_MODULE, API_DRIVER)),
(
"cuFuncSetBlockShape",
("hipFuncSetBlockShape", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuFuncSetSharedSize",
("hipFuncSetSharedSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuLaunch", ("hipLaunch", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuLaunchGrid", ("hipLaunchGrid", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuLaunchGridAsync",
("hipLaunchGridAsync", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuParamSetf", ("hipParamSetf", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
("cuParamSeti", ("hipParamSeti", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuParamSetSize",
("hipParamSetSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuParamSetSize",
("hipParamSetSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED),
),
("cuParamSetv", ("hipParamSetv", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuOccupancyMaxActiveBlocksPerMultiprocessor",
(
"hipModuleOccupancyMaxActiveBlocksPerMultiprocessor",
CONV_OCCUPANCY,
API_DRIVER,
),
),
(
"cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags",
(
"hipModuleOccupancyMaxActiveBlocksPerMultiprocessorWithFlags",
CONV_OCCUPANCY,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuOccupancyMaxPotentialBlockSize",
("hipModuleOccupancyMaxPotentialBlockSize", CONV_OCCUPANCY, API_DRIVER),
),
(
"cuOccupancyMaxPotentialBlockSizeWithFlags",
(
"hipModuleOccupancyMaxPotentialBlockSizeWithFlags",
CONV_OCCUPANCY,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
("cuStreamAddCallback", ("hipStreamAddCallback", CONV_STREAM, API_DRIVER)),
(
"cuStreamAttachMemAsync",
("hipStreamAttachMemAsync", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuStreamCreate",
("hipStreamCreate__", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuStreamCreateWithPriority",
("hipStreamCreateWithPriority", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuStreamDestroy", ("hipStreamDestroy", CONV_STREAM, API_DRIVER)),
("cuStreamDestroy_v2", ("hipStreamDestroy", CONV_STREAM, API_DRIVER)),
("cuStreamGetFlags", ("hipStreamGetFlags", CONV_STREAM, API_DRIVER)),
(
"cuStreamGetPriority",
("hipStreamGetPriority", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuStreamQuery", ("hipStreamQuery", CONV_STREAM, API_DRIVER)),
("cuStreamSynchronize", ("hipStreamSynchronize", CONV_STREAM, API_DRIVER)),
("cuStreamWaitEvent", ("hipStreamWaitEvent", CONV_STREAM, API_DRIVER)),
(
"cuStreamWaitValue32",
("hipStreamWaitValue32", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuStreamWriteValue32",
("hipStreamWriteValue32", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuStreamBatchMemOp",
("hipStreamBatchMemOp", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuArray3DCreate", ("hipArray3DCreate", CONV_MEM, API_DRIVER)),
(
"cuArray3DGetDescriptor",
("hipArray3DGetDescriptor", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuArrayCreate", ("hipArrayCreate", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuArrayDestroy", ("hipArrayDestroy", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuArrayGetDescriptor",
("hipArrayGetDescriptor", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuIpcCloseMemHandle",
("hipIpcCloseMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuIpcGetEventHandle",
("hipIpcGetEventHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuIpcGetMemHandle",
("hipIpcGetMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuIpcOpenEventHandle",
("hipIpcOpenEventHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuIpcOpenMemHandle",
("hipIpcOpenMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemAlloc_v2", ("hipMalloc", CONV_MEM, API_DRIVER)),
("cuMemAllocHost", ("hipMemAllocHost", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemAllocManaged",
("hipMemAllocManaged", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemAllocPitch",
("hipMemAllocPitch__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemcpy", ("hipMemcpy__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpy2D", ("hipMemcpy2D__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemcpy2DAsync",
("hipMemcpy2DAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemcpy2DUnaligned",
("hipMemcpy2DUnaligned", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemcpy3D", ("hipMemcpy3D__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemcpy3DAsync",
("hipMemcpy3DAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemcpy3DPeer",
("hipMemcpy3DPeer__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemcpy3DPeerAsync",
("hipMemcpy3DPeerAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemcpyAsync", ("hipMemcpyAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyAtoA", ("hipMemcpyAtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyAtoD", ("hipMemcpyAtoD", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyAtoH", ("hipMemcpyAtoH", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemcpyAtoHAsync",
("hipMemcpyAtoHAsync", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemcpyDtoA", ("hipMemcpyDtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemcpyDtoD_v2", ("hipMemcpyDtoD", CONV_MEM, API_DRIVER)),
("cuMemcpyDtoDAsync_v2", ("hipMemcpyDtoDAsync", CONV_MEM, API_DRIVER)),
("cuMemcpyDtoH_v2", ("hipMemcpyDtoH", CONV_MEM, API_DRIVER)),
("cuMemcpyDtoHAsync_v2", ("hipMemcpyDtoHAsync", CONV_MEM, API_DRIVER)),
("cuMemcpyHtoA", ("hipMemcpyHtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemcpyHtoAAsync",
("hipMemcpyHtoAAsync", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemcpyHtoD_v2", ("hipMemcpyHtoD", CONV_MEM, API_DRIVER)),
("cuMemcpyHtoDAsync_v2", ("hipMemcpyHtoDAsync", CONV_MEM, API_DRIVER)),
(
"cuMemcpyPeerAsync",
("hipMemcpyPeerAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemcpyPeer", ("hipMemcpyPeer__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
("cuMemFree", ("hipFree", CONV_MEM, API_DRIVER)),
("cuMemFree_v2", ("hipFree", CONV_MEM, API_DRIVER)),
("cuMemFreeHost", ("hipHostFree", CONV_MEM, API_DRIVER)),
(
"cuMemGetAddressRange",
("hipMemGetAddressRange", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemGetInfo_v2", ("hipMemGetInfo", CONV_MEM, API_DRIVER)),
("cuMemHostAlloc", ("hipHostMalloc", CONV_MEM, API_DRIVER)),
(
"cuMemHostGetDevicePointer",
("hipMemHostGetDevicePointer", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemHostGetFlags",
("hipMemHostGetFlags", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemHostRegister_v2", ("hipHostRegister", CONV_MEM, API_DRIVER)),
("cuMemHostUnregister", ("hipHostUnregister", CONV_MEM, API_DRIVER)),
("cuMemsetD16_v2", ("hipMemsetD16", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemsetD16Async",
("hipMemsetD16Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemsetD2D16_v2", ("hipMemsetD2D16", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemsetD2D16Async",
("hipMemsetD2D16Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemsetD2D32_v2", ("hipMemsetD2D32", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemsetD2D32Async",
("hipMemsetD2D32Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemsetD2D8_v2", ("hipMemsetD2D8", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemsetD2D8Async",
("hipMemsetD2D8Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemsetD32_v2", ("hipMemset", CONV_MEM, API_DRIVER)),
("cuMemsetD32Async", ("hipMemsetAsync", CONV_MEM, API_DRIVER)),
("cuMemsetD8_v2", ("hipMemsetD8", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemsetD8Async",
("hipMemsetD8Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMipmappedArrayCreate",
("hipMipmappedArrayCreate", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMipmappedArrayDestroy",
("hipMipmappedArrayDestroy", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMipmappedArrayGetLevel",
("hipMipmappedArrayGetLevel", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemPrefetchAsync",
("hipMemPrefetchAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("cuMemAdvise", ("hipMemAdvise", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuMemRangeGetAttribute",
("hipMemRangeGetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemRangeGetAttributes",
("hipMemRangeGetAttributes", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuPointerGetAttribute",
("hipPointerGetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuMemGetAddressRange_v2",
("hipMemGetAddressRange", CONV_MEM, API_DRIVER),
),
(
"cuPointerGetAttributes",
("hipPointerGetAttributes", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuPointerSetAttribute",
("hipPointerSetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED),
),
("CU_TR_FILTER_MODE_POINT", ("hipFilterModePoint", CONV_TEX, API_DRIVER)),
(
"CU_TR_FILTER_MODE_LINEAR",
("hipFilterModeLinear", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetAddress",
("hipTexRefGetAddress", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetAddressMode",
("hipTexRefGetAddressMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetArray",
("hipTexRefGetArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetBorderColor",
("hipTexRefGetBorderColor", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetFilterMode",
("hipTexRefGetFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetFlags",
("hipTexRefGetFlags", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetFormat",
("hipTexRefGetFormat", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetMaxAnisotropy",
("hipTexRefGetMaxAnisotropy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetMipmapFilterMode",
("hipTexRefGetMipmapFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetMipmapLevelBias",
("hipTexRefGetMipmapLevelBias", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetMipmapLevelClamp",
("hipTexRefGetMipmapLevelClamp", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefGetMipmappedArray",
("hipTexRefGetMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefSetAddress",
("hipTexRefSetAddress", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefSetAddress2D",
("hipTexRefSetAddress2D", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
("cuTexRefSetAddressMode", ("hipTexRefSetAddressMode", CONV_TEX, API_DRIVER)),
("cuTexRefSetArray", ("hipTexRefSetArray", CONV_TEX, API_DRIVER)),
(
"cuTexRefSetBorderColor",
("hipTexRefSetBorderColor", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
("cuTexRefSetFilterMode", ("hipTexRefSetFilterMode", CONV_TEX, API_DRIVER)),
("cuTexRefSetFlags", ("hipTexRefSetFlags", CONV_TEX, API_DRIVER)),
("cuTexRefSetFormat", ("hipTexRefSetFormat", CONV_TEX, API_DRIVER)),
(
"cuTexRefSetMaxAnisotropy",
("hipTexRefSetMaxAnisotropy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefSetMipmapFilterMode",
("hipTexRefSetMipmapFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefSetMipmapLevelBias",
("hipTexRefSetMipmapLevelBias", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefSetMipmapLevelClamp",
("hipTexRefSetMipmapLevelClamp", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexRefSetMipmappedArray",
("hipTexRefSetMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
("cuTexRefCreate", ("hipTexRefCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuTexRefDestroy",
("hipTexRefDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuSurfRefGetArray",
("hipSurfRefGetArray", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuSurfRefSetArray",
("hipSurfRefSetArray", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexObjectCreate",
("hipTexObjectCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexObjectDestroy",
("hipTexObjectDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexObjectGetResourceDesc",
("hipTexObjectGetResourceDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexObjectGetResourceViewDesc",
("hipTexObjectGetResourceViewDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuTexObjectGetTextureDesc",
("hipTexObjectGetTextureDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuSurfObjectCreate",
("hipSurfObjectCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuSurfObjectDestroy",
("hipSurfObjectDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuSurfObjectGetResourceDesc",
("hipSurfObjectGetResourceDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsMapResources",
("hipGraphicsMapResources", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsResourceGetMappedMipmappedArray",
(
"hipGraphicsResourceGetMappedMipmappedArray",
CONV_GRAPHICS,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuGraphicsResourceGetMappedPointer",
(
"hipGraphicsResourceGetMappedPointer",
CONV_GRAPHICS,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuGraphicsResourceSetMapFlags",
(
"hipGraphicsResourceSetMapFlags",
CONV_GRAPHICS,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuGraphicsSubResourceGetMappedArray",
(
"hipGraphicsSubResourceGetMappedArray",
CONV_GRAPHICS,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuGraphicsUnmapResources",
("hipGraphicsUnmapResources", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsUnregisterResource",
(
"hipGraphicsUnregisterResource",
CONV_GRAPHICS,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuProfilerInitialize",
("hipProfilerInitialize", CONV_OTHER, API_DRIVER, HIP_UNSUPPORTED),
),
("cuProfilerStart", ("hipProfilerStart", CONV_OTHER, API_DRIVER)),
("cuProfilerStop", ("hipProfilerStop", CONV_OTHER, API_DRIVER)),
(
"CU_GL_DEVICE_LIST_ALL",
("HIP_GL_DEVICE_LIST_ALL", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GL_DEVICE_LIST_CURRENT_FRAME",
("HIP_GL_DEVICE_LIST_CURRENT_FRAME", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GL_DEVICE_LIST_NEXT_FRAME",
("HIP_GL_DEVICE_LIST_NEXT_FRAME", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
("cuGLGetDevices", ("hipGLGetDevices", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuGraphicsGLRegisterBuffer",
("hipGraphicsGLRegisterBuffer", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsGLRegisterImage",
("hipGraphicsGLRegisterImage", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
("cuWGLGetDevice", ("hipWGLGetDevice", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
(
"CU_GL_MAP_RESOURCE_FLAGS_NONE",
("HIP_GL_MAP_RESOURCE_FLAGS_NONE", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY",
(
"HIP_GL_MAP_RESOURCE_FLAGS_READ_ONLY",
CONV_GL,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD",
(
"HIP_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD",
CONV_GL,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
("cuGLCtxCreate", ("hipGLCtxCreate", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
("cuGLInit", ("hipGLInit", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)),
(
"cuGLMapBufferObject",
("hipGLMapBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGLMapBufferObjectAsync",
("hipGLMapBufferObjectAsync", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGLRegisterBufferObject",
("hipGLRegisterBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGLSetBufferObjectMapFlags",
("hipGLSetBufferObjectMapFlags", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGLUnmapBufferObject",
("hipGLUnmapBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGLUnmapBufferObjectAsync",
("hipGLUnmapBufferObjectAsync", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGLUnregisterBufferObject",
("hipGLUnregisterBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D9_DEVICE_LIST_ALL",
("HIP_D3D9_DEVICE_LIST_ALL", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D9_DEVICE_LIST_CURRENT_FRAME",
(
"HIP_D3D9_DEVICE_LIST_CURRENT_FRAME",
CONV_D3D9,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D9_DEVICE_LIST_NEXT_FRAME",
("HIP_D3D9_DEVICE_LIST_NEXT_FRAME", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9CtxCreate",
("hipD3D9CtxCreate", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9CtxCreateOnDevice",
("hipD3D9CtxCreateOnDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9GetDevice",
("hipD3D9GetDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9GetDevices",
("hipD3D9GetDevices", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9GetDirect3DDevice",
("hipD3D9GetDirect3DDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsD3D9RegisterResource",
("hipGraphicsD3D9RegisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D9_MAPRESOURCE_FLAGS_NONE",
("HIP_D3D9_MAPRESOURCE_FLAGS_NONE", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D9_MAPRESOURCE_FLAGS_READONLY",
(
"HIP_D3D9_MAPRESOURCE_FLAGS_READONLY",
CONV_D3D9,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD",
(
"HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD",
CONV_D3D9,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D9_REGISTER_FLAGS_NONE",
("HIP_D3D9_REGISTER_FLAGS_NONE", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D9_REGISTER_FLAGS_ARRAY",
("HIP_D3D9_REGISTER_FLAGS_ARRAY", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9MapResources",
("hipD3D9MapResources", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9RegisterResource",
("hipD3D9RegisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9ResourceGetMappedArray",
("hipD3D9ResourceGetMappedArray", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9ResourceGetMappedPitch",
("hipD3D9ResourceGetMappedPitch", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9ResourceGetMappedPointer",
("hipD3D9ResourceGetMappedPointer", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9ResourceGetMappedSize",
("hipD3D9ResourceGetMappedSize", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9ResourceGetSurfaceDimensions",
(
"hipD3D9ResourceGetSurfaceDimensions",
CONV_D3D9,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuD3D9ResourceSetMapFlags",
("hipD3D9ResourceSetMapFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9UnmapResources",
("hipD3D9UnmapResources", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D9UnregisterResource",
("hipD3D9UnregisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D10_DEVICE_LIST_ALL",
("HIP_D3D10_DEVICE_LIST_ALL", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D10_DEVICE_LIST_CURRENT_FRAME",
(
"HIP_D3D10_DEVICE_LIST_CURRENT_FRAME",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D10_DEVICE_LIST_NEXT_FRAME",
(
"HIP_D3D10_DEVICE_LIST_NEXT_FRAME",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuD3D10GetDevice",
("hipD3D10GetDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10GetDevices",
("hipD3D10GetDevices", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsD3D10RegisterResource",
(
"hipGraphicsD3D10RegisterResource",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D10_MAPRESOURCE_FLAGS_NONE",
(
"HIP_D3D10_MAPRESOURCE_FLAGS_NONE",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D10_MAPRESOURCE_FLAGS_READONLY",
(
"HIP_D3D10_MAPRESOURCE_FLAGS_READONLY",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD",
(
"HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D10_REGISTER_FLAGS_NONE",
("HIP_D3D10_REGISTER_FLAGS_NONE", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D10_REGISTER_FLAGS_ARRAY",
("HIP_D3D10_REGISTER_FLAGS_ARRAY", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10CtxCreate",
("hipD3D10CtxCreate", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10CtxCreateOnDevice",
("hipD3D10CtxCreateOnDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10GetDirect3DDevice",
("hipD3D10GetDirect3DDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10MapResources",
("hipD3D10MapResources", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10RegisterResource",
("hipD3D10RegisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10ResourceGetMappedArray",
("hipD3D10ResourceGetMappedArray", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10ResourceGetMappedPitch",
("hipD3D10ResourceGetMappedPitch", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10ResourceGetMappedPointer",
(
"hipD3D10ResourceGetMappedPointer",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuD3D10ResourceGetMappedSize",
("hipD3D10ResourceGetMappedSize", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10ResourceGetSurfaceDimensions",
(
"hipD3D10ResourceGetSurfaceDimensions",
CONV_D3D10,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuD310ResourceSetMapFlags",
("hipD3D10ResourceSetMapFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10UnmapResources",
("hipD3D10UnmapResources", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D10UnregisterResource",
("hipD3D10UnregisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D11_DEVICE_LIST_ALL",
("HIP_D3D11_DEVICE_LIST_ALL", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"CU_D3D11_DEVICE_LIST_CURRENT_FRAME",
(
"HIP_D3D11_DEVICE_LIST_CURRENT_FRAME",
CONV_D3D11,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"CU_D3D11_DEVICE_LIST_NEXT_FRAME",
(
"HIP_D3D11_DEVICE_LIST_NEXT_FRAME",
CONV_D3D11,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuD3D11GetDevice",
("hipD3D11GetDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D11GetDevices",
("hipD3D11GetDevices", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsD3D11RegisterResource",
(
"hipGraphicsD3D11RegisterResource",
CONV_D3D11,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuD3D11CtxCreate",
("hipD3D11CtxCreate", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D11CtxCreateOnDevice",
("hipD3D11CtxCreateOnDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuD3D11GetDirect3DDevice",
("hipD3D11GetDirect3DDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsVDPAURegisterOutputSurface",
(
"hipGraphicsVDPAURegisterOutputSurface",
CONV_VDPAU,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuGraphicsVDPAURegisterVideoSurface",
(
"hipGraphicsVDPAURegisterVideoSurface",
CONV_VDPAU,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuVDPAUGetDevice",
("hipVDPAUGetDevice", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuVDPAUCtxCreate",
("hipVDPAUCtxCreate", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamConsumerAcquireFrame",
("hipEGLStreamConsumerAcquireFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamConsumerConnect",
("hipEGLStreamConsumerConnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamConsumerConnectWithFlags",
(
"hipEGLStreamConsumerConnectWithFlags",
CONV_EGL,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
(
"cuEGLStreamConsumerDisconnect",
("hipEGLStreamConsumerDisconnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamConsumerReleaseFrame",
("hipEGLStreamConsumerReleaseFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamProducerConnect",
("hipEGLStreamProducerConnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamProducerDisconnect",
("hipEGLStreamProducerDisconnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamProducerPresentFrame",
("hipEGLStreamProducerPresentFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuEGLStreamProducerReturnFrame",
("hipEGLStreamProducerReturnFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsEGLRegisterImage",
("hipGraphicsEGLRegisterImage", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED),
),
(
"cuGraphicsResourceGetMappedEglFrame",
(
"hipGraphicsResourceGetMappedEglFrame",
CONV_EGL,
API_DRIVER,
HIP_UNSUPPORTED,
),
),
("cudaDataType_t", ("hipDataType", CONV_TYPE, API_RUNTIME)),
("cudaDataType", ("hipDataType", CONV_TYPE, API_RUNTIME)),
("CUDA_R_32F", ("HIP_R_32F", CONV_TYPE, API_RUNTIME)),
("CUDA_R_64F", ("HIP_R_64F", CONV_TYPE, API_RUNTIME)),
("CUDA_R_16F", ("HIP_R_16F", CONV_TYPE, API_RUNTIME)),
("CUDA_R_8I", ("HIP_R_8I", CONV_TYPE, API_RUNTIME)),
("CUDA_C_32F", ("HIP_C_32F", CONV_TYPE, API_RUNTIME)),
("CUDA_C_64F", ("HIP_C_64F", CONV_TYPE, API_RUNTIME)),
("CUDA_C_16F", ("HIP_C_16F", CONV_TYPE, API_RUNTIME)),
("CUDA_C_8I", ("HIP_C_8I", CONV_TYPE, API_RUNTIME)),
("CUDA_R_8U", ("HIP_R_8U", CONV_TYPE, API_RUNTIME)),
("CUDA_C_8U", ("HIP_C_8U", CONV_TYPE, API_RUNTIME)),
("CUDA_R_32I", ("HIP_R_32I", CONV_TYPE, API_RUNTIME)),
("CUDA_C_32I", ("HIP_C_32I", CONV_TYPE, API_RUNTIME)),
("CUDA_R_32U", ("HIP_R_32U", CONV_TYPE, API_RUNTIME)),
("CUDA_C_32U", ("HIP_C_32U", CONV_TYPE, API_RUNTIME)),
("CUDA_R_16BF", ("HIP_R_16BF", CONV_TYPE, API_RUNTIME)),
("CUDA_C_16BF", ("HIP_C_16BF", CONV_TYPE, API_RUNTIME)),
("CUDA_R_4I", ("HIP_R_4I", CONV_TYPE, API_RUNTIME)),
("CUDA_C_4I", ("HIP_C_4I", CONV_TYPE, API_RUNTIME)),
("CUDA_R_4U", ("HIP_R_4U", CONV_TYPE, API_RUNTIME)),
("CUDA_C_4U", ("HIP_C_4U", CONV_TYPE, API_RUNTIME)),
("CUDA_R_16I", ("HIP_R_16I", CONV_TYPE, API_RUNTIME)),
("CUDA_C_16I", ("HIP_C_16I", CONV_TYPE, API_RUNTIME)),
("CUDA_R_16U", ("HIP_R_16U", CONV_TYPE, API_RUNTIME)),
("CUDA_C_16U", ("HIP_C_16U", CONV_TYPE, API_RUNTIME)),
("CUDA_R_64I", ("HIP_R_64I", CONV_TYPE, API_RUNTIME)),
("CUDA_C_64I", ("HIP_C_64I", CONV_TYPE, API_RUNTIME)),
("CUDA_R_64U", ("HIP_R_64U", CONV_TYPE, API_RUNTIME)),
("CUDA_C_64U", ("HIP_C_64U", CONV_TYPE, API_RUNTIME)),
("CUDA_R_8F_E4M3", ("HIP_R_8F_E4M3", CONV_TYPE, API_RUNTIME)),
("CUDA_R_8F_E5M2", ("HIP_R_8F_E5M2", CONV_TYPE, API_RUNTIME)),
(
"MAJOR_VERSION",
("hipLibraryMajorVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"MINOR_VERSION",
("hipLibraryMinorVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"PATCH_LEVEL",
("hipLibraryPatchVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAttachGlobal",
("hipMemAttachGlobal", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAttachHost",
("hipMemAttachHost", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAttachSingle",
("hipMemAttachSingle", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaOccupancyDefault",
("hipOccupancyDefault", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaOccupancyDisableCachingOverride",
(
"hipOccupancyDisableCachingOverride",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
("cudaGetLastError", ("hipGetLastError", CONV_ERROR, API_RUNTIME)),
("cudaPeekAtLastError", ("hipPeekAtLastError", CONV_ERROR, API_RUNTIME)),
("cudaGetErrorName", ("hipGetErrorName", CONV_ERROR, API_RUNTIME)),
("cudaGetErrorString", ("hipGetErrorString", CONV_ERROR, API_RUNTIME)),
("cudaMemcpy3DParms", ("hipMemcpy3DParms", CONV_MEM, API_RUNTIME)),
(
"cudaMemcpy3DPeerParms",
("hipMemcpy3DPeerParms", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMemcpy", ("hipMemcpy", CONV_MEM, API_RUNTIME)),
("cudaMemcpyToArray", ("hipMemcpyToArray", CONV_MEM, API_RUNTIME)),
("cudaMemcpyToSymbol", ("hipMemcpyToSymbol", CONV_MEM, API_RUNTIME)),
("cudaMemcpyToSymbolAsync", ("hipMemcpyToSymbolAsync", CONV_MEM, API_RUNTIME)),
("cudaMemcpyAsync", ("hipMemcpyAsync", CONV_MEM, API_RUNTIME)),
("cudaMemcpy2D", ("hipMemcpy2D", CONV_MEM, API_RUNTIME)),
("cudaMemcpy2DAsync", ("hipMemcpy2DAsync", CONV_MEM, API_RUNTIME)),
("cudaMemcpy2DToArray", ("hipMemcpy2DToArray", CONV_MEM, API_RUNTIME)),
(
"cudaMemcpy2DArrayToArray",
("hipMemcpy2DArrayToArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpy2DFromArray",
("hipMemcpy2DFromArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpy2DFromArrayAsync",
("hipMemcpy2DFromArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpy2DToArrayAsync",
("hipMemcpy2DToArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMemcpy3D", ("hipMemcpy3D", CONV_MEM, API_RUNTIME)),
(
"cudaMemcpy3DAsync",
("hipMemcpy3DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpy3DPeer",
("hipMemcpy3DPeer", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpy3DPeerAsync",
("hipMemcpy3DPeerAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpyArrayToArray",
("hipMemcpyArrayToArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemcpyFromArrayAsync",
("hipMemcpyFromArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMemcpyFromSymbol", ("hipMemcpyFromSymbol", CONV_MEM, API_RUNTIME)),
(
"cudaMemcpyFromSymbolAsync",
("hipMemcpyFromSymbolAsync", CONV_MEM, API_RUNTIME),
),
("cudaMemAdvise", ("hipMemAdvise", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
(
"cudaMemRangeGetAttribute",
("hipMemRangeGetAttribute", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemRangeGetAttributes",
("hipMemRangeGetAttributes", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAdviseSetReadMostly",
("hipMemAdviseSetReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAdviseUnsetReadMostly",
("hipMemAdviseUnsetReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAdviseSetPreferredLocation",
(
"hipMemAdviseSetPreferredLocation",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaMemAdviseUnsetPreferredLocation",
(
"hipMemAdviseUnsetPreferredLocation",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaMemAdviseSetAccessedBy",
("hipMemAdviseSetAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemAdviseUnsetAccessedBy",
("hipMemAdviseUnsetAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemRangeAttributeReadMostly",
("hipMemRangeAttributeReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemRangeAttributePreferredLocation",
(
"hipMemRangeAttributePreferredLocation",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaMemRangeAttributeAccessedBy",
("hipMemRangeAttributeAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemRangeAttributeLastPrefetchLocation",
(
"hipMemRangeAttributeLastPrefetchLocation",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
("cudaMemcpyHostToHost", ("hipMemcpyHostToHost", CONV_MEM, API_RUNTIME)),
("cudaMemcpyHostToDevice", ("hipMemcpyHostToDevice", CONV_MEM, API_RUNTIME)),
("cudaMemcpyDeviceToHost", ("hipMemcpyDeviceToHost", CONV_MEM, API_RUNTIME)),
(
"cudaMemcpyDeviceToDevice",
("hipMemcpyDeviceToDevice", CONV_MEM, API_RUNTIME),
),
("cudaMemcpyDefault", ("hipMemcpyDefault", CONV_MEM, API_RUNTIME)),
("cudaMemset", ("hipMemset", CONV_MEM, API_RUNTIME)),
("cudaMemsetAsync", ("hipMemsetAsync", CONV_MEM, API_RUNTIME)),
("cudaMemset2D", ("hipMemset2D", CONV_MEM, API_RUNTIME)),
(
"cudaMemset2DAsync",
("hipMemset2DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMemset3D", ("hipMemset3D", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)),
(
"cudaMemset3DAsync",
("hipMemset3DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMemGetInfo", ("hipMemGetInfo", CONV_MEM, API_RUNTIME)),
(
"cudaArrayGetInfo",
("hipArrayGetInfo", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaFreeMipmappedArray",
("hipFreeMipmappedArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetMipmappedArrayLevel",
("hipGetMipmappedArrayLevel", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetSymbolAddress",
("hipGetSymbolAddress", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetSymbolSize",
("hipGetSymbolSize", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMemPrefetchAsync",
("hipMemPrefetchAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMallocHost", ("hipHostMalloc", CONV_MEM, API_RUNTIME)),
("cudaMallocArray", ("hipMallocArray", CONV_MEM, API_RUNTIME)),
("cudaMalloc", ("hipMalloc", CONV_MEM, API_RUNTIME)),
("cudaMalloc3D", ("hipMalloc3D", CONV_MEM, API_RUNTIME)),
("cudaMalloc3DArray", ("hipMalloc3DArray", CONV_MEM, API_RUNTIME)),
(
"cudaMallocManaged",
("hipMallocManaged", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaMallocMipmappedArray",
("hipMallocMipmappedArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaMallocPitch", ("hipMallocPitch", CONV_MEM, API_RUNTIME)),
("cudaFreeHost", ("hipHostFree", CONV_MEM, API_RUNTIME)),
("cudaFreeArray", ("hipFreeArray", CONV_MEM, API_RUNTIME)),
("cudaFree", ("hipFree", CONV_MEM, API_RUNTIME)),
("cudaHostRegister", ("hipHostRegister", CONV_MEM, API_RUNTIME)),
("cudaHostUnregister", ("hipHostUnregister", CONV_MEM, API_RUNTIME)),
("cudaHostAlloc", ("hipHostMalloc", CONV_MEM, API_RUNTIME)),
("cudaMemoryTypeHost", ("hipMemoryTypeHost", CONV_MEM, API_RUNTIME)),
("cudaMemoryTypeDevice", ("hipMemoryTypeDevice", CONV_MEM, API_RUNTIME)),
("make_cudaExtent", ("make_hipExtent", CONV_MEM, API_RUNTIME)),
("make_cudaPitchedPtr", ("make_hipPitchedPtr", CONV_MEM, API_RUNTIME)),
("make_cudaPos", ("make_hipPos", CONV_MEM, API_RUNTIME)),
("cudaHostAllocDefault", ("hipHostMallocDefault", CONV_MEM, API_RUNTIME)),
("cudaHostAllocPortable", ("hipHostMallocPortable", CONV_MEM, API_RUNTIME)),
("cudaHostAllocMapped", ("hipHostMallocMapped", CONV_MEM, API_RUNTIME)),
(
"cudaHostAllocWriteCombined",
("hipHostMallocWriteCombined", CONV_MEM, API_RUNTIME),
),
("cudaHostGetFlags", ("hipHostGetFlags", CONV_MEM, API_RUNTIME)),
("cudaHostRegisterDefault", ("hipHostRegisterDefault", CONV_MEM, API_RUNTIME)),
(
"cudaHostRegisterPortable",
("hipHostRegisterPortable", CONV_MEM, API_RUNTIME),
),
("cudaHostRegisterMapped", ("hipHostRegisterMapped", CONV_MEM, API_RUNTIME)),
(
"cudaHostRegisterIoMemory",
("hipHostRegisterIoMemory", CONV_MEM, API_RUNTIME),
),
# ("warpSize", ("hipWarpSize", CONV_SPECIAL_FUNC, API_RUNTIME), (HIP actually uses warpSize...)),
("cudaEventCreate", ("hipEventCreate", CONV_EVENT, API_RUNTIME)),
(
"cudaEventCreateWithFlags",
("hipEventCreateWithFlags", CONV_EVENT, API_RUNTIME),
),
("cudaEventDestroy", ("hipEventDestroy", CONV_EVENT, API_RUNTIME)),
("cudaEventRecord", ("hipEventRecord", CONV_EVENT, API_RUNTIME)),
("cudaEventElapsedTime", ("hipEventElapsedTime", CONV_EVENT, API_RUNTIME)),
("cudaEventSynchronize", ("hipEventSynchronize", CONV_EVENT, API_RUNTIME)),
("cudaEventQuery", ("hipEventQuery", CONV_EVENT, API_RUNTIME)),
("cudaEventDefault", ("hipEventDefault", CONV_EVENT, API_RUNTIME)),
("cudaEventBlockingSync", ("hipEventBlockingSync", CONV_EVENT, API_RUNTIME)),
("cudaEventDisableTiming", ("hipEventDisableTiming", CONV_EVENT, API_RUNTIME)),
("cudaEventInterprocess", ("hipEventInterprocess", CONV_EVENT, API_RUNTIME)),
("cudaStreamCreate", ("hipStreamCreate", CONV_STREAM, API_RUNTIME)),
(
"cudaStreamCreateWithFlags",
("hipStreamCreateWithFlags", CONV_STREAM, API_RUNTIME),
),
(
"cudaStreamCreateWithPriority",
("hipStreamCreateWithPriority", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaStreamDestroy", ("hipStreamDestroy", CONV_STREAM, API_RUNTIME)),
("cudaStreamWaitEvent", ("hipStreamWaitEvent", CONV_STREAM, API_RUNTIME)),
("cudaStreamSynchronize", ("hipStreamSynchronize", CONV_STREAM, API_RUNTIME)),
("cudaStreamGetFlags", ("hipStreamGetFlags", CONV_STREAM, API_RUNTIME)),
("cudaStreamQuery", ("hipStreamQuery", CONV_STREAM, API_RUNTIME)),
("cudaStreamAddCallback", ("hipStreamAddCallback", CONV_STREAM, API_RUNTIME)),
(
"cudaStreamAttachMemAsync",
("hipStreamAttachMemAsync", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaStreamGetPriority",
("hipStreamGetPriority", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaCpuDeviceId", ("hipCpuDeviceId", CONV_TYPE, API_RUNTIME)),
("cudaStreamDefault", ("hipStreamDefault", CONV_TYPE, API_RUNTIME)),
("cudaStreamNonBlocking", ("hipStreamNonBlocking", CONV_TYPE, API_RUNTIME)),
("cudaStreamGetCaptureInfo", ("hipStreamGetCaptureInfo", CONV_TYPE, API_RUNTIME)),
("cudaStreamGetCaptureInfo_v2", ("hipStreamGetCaptureInfo_v2", CONV_TYPE, API_RUNTIME)),
("cudaStreamCaptureStatus", ("hipStreamCaptureStatus", CONV_TYPE, API_RUNTIME)),
("cudaStreamCaptureStatusActive", ("hipStreamCaptureStatusActive", CONV_TYPE, API_RUNTIME)),
("cudaStreamCaptureMode", ("hipStreamCaptureMode", CONV_TYPE, API_RUNTIME)),
("cudaStreamCaptureModeGlobal", ("hipStreamCaptureModeGlobal", CONV_TYPE, API_RUNTIME)),
("cudaStreamCaptureModeRelaxed", ("hipStreamCaptureModeRelaxed", CONV_TYPE, API_RUNTIME)),
("cudaStreamCaptureModeThreadLocal", ("hipStreamCaptureModeThreadLocal", CONV_TYPE, API_RUNTIME)),
("cudaStreamBeginCapture", ("hipStreamBeginCapture", CONV_TYPE, API_RUNTIME)),
("cudaStreamEndCapture", ("hipStreamEndCapture", CONV_TYPE, API_RUNTIME)),
("cudaGraphInstantiate", ("hipGraphInstantiate", CONV_TYPE, API_RUNTIME)),
("cudaGraphInstantiateWithFlags", ("hipGraphInstantiateWithFlags", CONV_TYPE, API_RUNTIME)),
("cudaGraphInstantiateFlagAutoFreeOnLaunch", ("hipGraphInstantiateFlagAutoFreeOnLaunch", CONV_TYPE, API_RUNTIME)),
("cudaGraphDestroy", ("hipGraphDestroy", CONV_TYPE, API_RUNTIME)),
("cudaGraphExecDestroy", ("hipGraphExecDestroy", CONV_TYPE, API_RUNTIME)),
("cudaGraphLaunch", ("hipGraphLaunch", CONV_TYPE, API_RUNTIME)),
("cudaGraphGetNodes", ("hipGraphGetNodes", CONV_TYPE, API_RUNTIME)),
("cudaGraphDebugDotPrint", ("hipGraphDebugDotPrint", CONV_TYPE, API_RUNTIME)),
("cudaGraphDebugDotFlagsVerbose", ("hipGraphDebugDotFlagsVerbose", CONV_NUMERIC_LITERAL, API_RUNTIME)),
("cudaGraphRetainUserObject", ("hipGraphRetainUserObject", CONV_TYPE, API_RUNTIME)),
("cudaGraphUserObjectMove", ("hipGraphUserObjectMove", CONV_TYPE, API_RUNTIME)),
("cudaUserObject_t", ("hipUserObject_t", CONV_TYPE, API_RUNTIME)),
("cudaUserObjectCreate", ("hipUserObjectCreate", CONV_TYPE, API_RUNTIME)),
("cudaUserObjectNoDestructorSync", ("hipUserObjectNoDestructorSync", CONV_TYPE, API_RUNTIME)),
("cudaThreadExchangeStreamCaptureMode", ("hipThreadExchangeStreamCaptureMode", CONV_TYPE, API_RUNTIME)),
("cudaStreamIsCapturing", ("hipStreamIsCapturing", CONV_TYPE, API_RUNTIME)),
("cudaDeviceSynchronize", ("hipDeviceSynchronize", CONV_DEVICE, API_RUNTIME)),
("cudaDeviceReset", ("hipDeviceReset", CONV_DEVICE, API_RUNTIME)),
("cudaSetDevice", ("hipSetDevice", CONV_DEVICE, API_RUNTIME)),
("cudaGetDevice", ("hipGetDevice", CONV_DEVICE, API_RUNTIME)),
("cudaGetDeviceCount", ("hipGetDeviceCount", CONV_DEVICE, API_RUNTIME)),
("cudaChooseDevice", ("hipChooseDevice", CONV_DEVICE, API_RUNTIME)),
("cudaThreadExit", ("hipDeviceReset", CONV_THREAD, API_RUNTIME)),
(
"cudaThreadGetCacheConfig",
("hipDeviceGetCacheConfig", CONV_THREAD, API_RUNTIME),
),
(
"cudaThreadGetLimit",
("hipThreadGetLimit", CONV_THREAD, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaThreadSetCacheConfig",
("hipDeviceSetCacheConfig", CONV_THREAD, API_RUNTIME),
),
(
"cudaThreadSetLimit",
("hipThreadSetLimit", CONV_THREAD, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaThreadSynchronize", ("hipDeviceSynchronize", CONV_THREAD, API_RUNTIME)),
("cudaDeviceGetAttribute", ("hipDeviceGetAttribute", CONV_DEVICE, API_RUNTIME)),
(
"cudaDevAttrMaxThreadsPerBlock",
("hipDeviceAttributeMaxThreadsPerBlock", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxBlockDimX",
("hipDeviceAttributeMaxBlockDimX", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxBlockDimY",
("hipDeviceAttributeMaxBlockDimY", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxBlockDimZ",
("hipDeviceAttributeMaxBlockDimZ", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxGridDimX",
("hipDeviceAttributeMaxGridDimX", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxGridDimY",
("hipDeviceAttributeMaxGridDimY", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxGridDimZ",
("hipDeviceAttributeMaxGridDimZ", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxSharedMemoryPerBlock",
("hipDeviceAttributeMaxSharedMemoryPerBlock", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxSharedMemoryPerBlockOptin",
("hipDeviceAttributeMaxSharedMemoryPerBlock", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrTotalConstantMemory",
("hipDeviceAttributeTotalConstantMemory", CONV_TYPE, API_RUNTIME),
),
("cudaDevAttrWarpSize", ("hipDeviceAttributeWarpSize", CONV_TYPE, API_RUNTIME)),
(
"cudaDevAttrMaxPitch",
("hipDeviceAttributeMaxPitch", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDevAttrMaxRegistersPerBlock",
("hipDeviceAttributeMaxRegistersPerBlock", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrClockRate",
("hipDeviceAttributeClockRate", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrTextureAlignment",
(
"hipDeviceAttributeTextureAlignment",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrGpuOverlap",
("hipDeviceAttributeGpuOverlap", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDevAttrMultiProcessorCount",
("hipDeviceAttributeMultiprocessorCount", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrKernelExecTimeout",
(
"hipDeviceAttributeKernelExecTimeout",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrIntegrated",
("hipDeviceAttributeIntegrated", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDevAttrCanMapHostMemory",
(
"hipDeviceAttributeCanMapHostMemory",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrComputeMode",
("hipDeviceAttributeComputeMode", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxTexture1DWidth",
(
"hipDeviceAttributeMaxTexture1DWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DWidth",
(
"hipDeviceAttributeMaxTexture2DWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DHeight",
(
"hipDeviceAttributeMaxTexture2DHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture3DWidth",
(
"hipDeviceAttributeMaxTexture3DWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture3DHeight",
(
"hipDeviceAttributeMaxTexture3DHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture3DDepth",
(
"hipDeviceAttributeMaxTexture3DDepth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DLayeredWidth",
(
"hipDeviceAttributeMaxTexture2DLayeredWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DLayeredHeight",
(
"hipDeviceAttributeMaxTexture2DLayeredHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DLayeredLayers",
(
"hipDeviceAttributeMaxTexture2DLayeredLayers",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrSurfaceAlignment",
(
"hipDeviceAttributeSurfaceAlignment",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrConcurrentKernels",
("hipDeviceAttributeConcurrentKernels", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrEccEnabled",
("hipDeviceAttributeEccEnabled", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaDevAttrPciBusId", ("hipDeviceAttributePciBusId", CONV_TYPE, API_RUNTIME)),
(
"cudaDevAttrPciDeviceId",
("hipDeviceAttributePciDeviceId", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrTccDriver",
("hipDeviceAttributeTccDriver", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDevAttrMemoryClockRate",
("hipDeviceAttributeMemoryClockRate", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrGlobalMemoryBusWidth",
("hipDeviceAttributeMemoryBusWidth", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrL2CacheSize",
("hipDeviceAttributeL2CacheSize", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxThreadsPerMultiProcessor",
("hipDeviceAttributeMaxThreadsPerMultiProcessor", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrAsyncEngineCount",
(
"hipDeviceAttributeAsyncEngineCount",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrUnifiedAddressing",
(
"hipDeviceAttributeUnifiedAddressing",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture1DLayeredWidth",
(
"hipDeviceAttributeMaxTexture1DLayeredWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture1DLayeredLayers",
(
"hipDeviceAttributeMaxTexture1DLayeredLayers",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DGatherWidth",
(
"hipDeviceAttributeMaxTexture2DGatherWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DGatherHeight",
(
"hipDeviceAttributeMaxTexture2DGatherHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture3DWidthAlt",
(
"hipDeviceAttributeMaxTexture3DWidthAlternate",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture3DHeightAlt",
(
"hipDeviceAttributeMaxTexture3DHeightAlternate",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture3DDepthAlt",
(
"hipDeviceAttributeMaxTexture3DDepthAlternate",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrPciDomainId",
("hipDeviceAttributePciDomainId", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDevAttrTexturePitchAlignment",
(
"hipDeviceAttributeTexturePitchAlignment",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTextureCubemapWidth",
(
"hipDeviceAttributeMaxTextureCubemapWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTextureCubemapLayeredWidth",
(
"hipDeviceAttributeMaxTextureCubemapLayeredWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTextureCubemapLayeredLayers",
(
"hipDeviceAttributeMaxTextureCubemapLayeredLayers",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface1DWidth",
(
"hipDeviceAttributeMaxSurface1DWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface2DWidth",
(
"hipDeviceAttributeMaxSurface2DWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface2DHeight",
(
"hipDeviceAttributeMaxSurface2DHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface3DWidth",
(
"hipDeviceAttributeMaxSurface3DWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface3DHeight",
(
"hipDeviceAttributeMaxSurface3DHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface3DDepth",
(
"hipDeviceAttributeMaxSurface3DDepth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface1DLayeredWidth",
(
"hipDeviceAttributeMaxSurface1DLayeredWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface1DLayeredLayers",
(
"hipDeviceAttributeMaxSurface1DLayeredLayers",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface2DLayeredWidth",
(
"hipDeviceAttributeMaxSurface2DLayeredWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface2DLayeredHeight",
(
"hipDeviceAttributeMaxSurface2DLayeredHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurface2DLayeredLayers",
(
"hipDeviceAttributeMaxSurface2DLayeredLayers",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurfaceCubemapWidth",
(
"hipDeviceAttributeMaxSurfaceCubemapWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurfaceCubemapLayeredWidth",
(
"hipDeviceAttributeMaxSurfaceCubemapLayeredWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSurfaceCubemapLayeredLayers",
(
"hipDeviceAttributeMaxSurfaceCubemapLayeredLayers",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture1DLinearWidth",
(
"hipDeviceAttributeMaxTexture1DLinearWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DLinearWidth",
(
"hipDeviceAttributeMaxTexture2DLinearWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DLinearHeight",
(
"hipDeviceAttributeMaxTexture2DLinearHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DLinearPitch",
(
"hipDeviceAttributeMaxTexture2DLinearPitch",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DMipmappedWidth",
(
"hipDeviceAttributeMaxTexture2DMipmappedWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxTexture2DMipmappedHeight",
(
"hipDeviceAttributeMaxTexture2DMipmappedHeight",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrComputeCapabilityMajor",
("hipDeviceAttributeComputeCapabilityMajor", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrComputeCapabilityMinor",
("hipDeviceAttributeComputeCapabilityMinor", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMaxTexture1DMipmappedWidth",
(
"hipDeviceAttributeMaxTexture1DMipmappedWidth",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrStreamPrioritiesSupported",
(
"hipDeviceAttributeStreamPrioritiesSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrGlobalL1CacheSupported",
(
"hipDeviceAttributeGlobalL1CacheSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrLocalL1CacheSupported",
(
"hipDeviceAttributeLocalL1CacheSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrMaxSharedMemoryPerMultiprocessor",
(
"hipDeviceAttributeMaxSharedMemoryPerMultiprocessor",
CONV_TYPE,
API_RUNTIME,
),
),
(
"cudaDevAttrMaxRegistersPerMultiprocessor",
(
"hipDeviceAttributeMaxRegistersPerMultiprocessor",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrManagedMemory",
(
"hipDeviceAttributeManagedMemory",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrIsMultiGpuBoard",
("hipDeviceAttributeIsMultiGpuBoard", CONV_TYPE, API_RUNTIME),
),
(
"cudaDevAttrMultiGpuBoardGroupID",
(
"hipDeviceAttributeMultiGpuBoardGroupID",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrHostNativeAtomicSupported",
(
"hipDeviceAttributeHostNativeAtomicSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrSingleToDoublePrecisionPerfRatio",
(
"hipDeviceAttributeSingleToDoublePrecisionPerfRatio",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrPageableMemoryAccess",
(
"hipDeviceAttributePageableMemoryAccess",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrConcurrentManagedAccess",
(
"hipDeviceAttributeConcurrentManagedAccess",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrComputePreemptionSupported",
(
"hipDeviceAttributeComputePreemptionSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevAttrCanUseHostPointerForRegisteredMem",
(
"hipDeviceAttributeCanUseHostPointerForRegisteredMem",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaPointerGetAttributes",
("hipPointerGetAttributes", CONV_MEM, API_RUNTIME),
),
(
"cudaHostGetDevicePointer",
("hipHostGetDevicePointer", CONV_MEM, API_RUNTIME),
),
(
"cudaGetDeviceProperties",
("hipGetDeviceProperties", CONV_DEVICE, API_RUNTIME),
),
("cudaDeviceGetPCIBusId", ("hipDeviceGetPCIBusId", CONV_DEVICE, API_RUNTIME)),
(
"cudaDeviceGetByPCIBusId",
("hipDeviceGetByPCIBusId", CONV_DEVICE, API_RUNTIME),
),
(
"cudaDeviceGetStreamPriorityRange",
(
"hipDeviceGetStreamPriorityRange",
CONV_DEVICE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaSetValidDevices",
("hipSetValidDevices", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDevP2PAttrPerformanceRank",
(
"hipDeviceP2PAttributePerformanceRank",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevP2PAttrAccessSupported",
(
"hipDeviceP2PAttributeAccessSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDevP2PAttrNativeAtomicSupported",
(
"hipDeviceP2PAttributeNativeAtomicSupported",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaDeviceGetP2PAttribute",
("hipDeviceGetP2PAttribute", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaComputeModeDefault",
("hipComputeModeDefault", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaComputeModeExclusive",
("hipComputeModeExclusive", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaComputeModeProhibited",
("hipComputeModeProhibited", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaComputeModeExclusiveProcess",
("hipComputeModeExclusiveProcess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetDeviceFlags",
("hipGetDeviceFlags", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaSetDeviceFlags", ("hipSetDeviceFlags", CONV_DEVICE, API_RUNTIME)),
("cudaDeviceScheduleAuto", ("hipDeviceScheduleAuto", CONV_TYPE, API_RUNTIME)),
("cudaDeviceScheduleSpin", ("hipDeviceScheduleSpin", CONV_TYPE, API_RUNTIME)),
("cudaDeviceScheduleYield", ("hipDeviceScheduleYield", CONV_TYPE, API_RUNTIME)),
(
"cudaDeviceBlockingSync",
("hipDeviceScheduleBlockingSync", CONV_TYPE, API_RUNTIME),
),
(
"cudaDeviceScheduleBlockingSync",
("hipDeviceScheduleBlockingSync", CONV_TYPE, API_RUNTIME),
),
(
"cudaDeviceScheduleMask",
("hipDeviceScheduleMask", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaDeviceMapHost", ("hipDeviceMapHost", CONV_TYPE, API_RUNTIME)),
(
"cudaDeviceLmemResizeToMax",
("hipDeviceLmemResizeToMax", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaDeviceMask", ("hipDeviceMask", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)),
(
"cudaDeviceSetCacheConfig",
("hipDeviceSetCacheConfig", CONV_CACHE, API_RUNTIME),
),
(
"cudaDeviceGetCacheConfig",
("hipDeviceGetCacheConfig", CONV_CACHE, API_RUNTIME),
),
(
"cudaFuncAttributes",
("hipFuncAttributes", CONV_TYPE, API_RUNTIME),
),
(
"cudaFuncAttributeMaxDynamicSharedMemorySize",
("hipFuncAttributeMaxDynamicSharedMemorySize", CONV_TYPE, API_RUNTIME),
),
(
"cudaFuncAttributePreferredSharedMemoryCarveout",
("hipFuncAttributePreferredSharedMemoryCarveout", CONV_TYPE, API_RUNTIME),
),
(
"cudaFuncSetAttribute",
("hipFuncSetAttribute", CONV_EXEC, API_RUNTIME),
),
("cudaFuncSetCacheConfig", ("hipFuncSetCacheConfig", CONV_CACHE, API_RUNTIME)),
(
"cudaFuncCachePreferNone",
("hipFuncCachePreferNone", CONV_CACHE, API_RUNTIME),
),
(
"cudaFuncCachePreferShared",
("hipFuncCachePreferShared", CONV_CACHE, API_RUNTIME),
),
("cudaFuncCachePreferL1", ("hipFuncCachePreferL1", CONV_CACHE, API_RUNTIME)),
(
"cudaFuncCachePreferEqual",
("hipFuncCachePreferEqual", CONV_CACHE, API_RUNTIME),
),
(
"cudaFuncGetAttributes",
("hipFuncGetAttributes", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaFuncSetSharedMemConfig",
("hipFuncSetSharedMemConfig", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetParameterBuffer",
("hipGetParameterBuffer", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaSetDoubleForDevice",
("hipSetDoubleForDevice", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaSetDoubleForHost",
("hipSetDoubleForHost", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaConfigureCall",
("hipConfigureCall", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaLaunch", ("hipLaunch", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)),
(
"cudaLaunchCooperativeKernel",
("hipLaunchCooperativeKernel", CONV_EXEC, API_RUNTIME),
),
("cudaLaunchHostFunc", ("hipLaunchHostFunc", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)),
(
"cudaSetupArgument",
("hipSetupArgument", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaDriverGetVersion", ("hipDriverGetVersion", CONV_VERSION, API_RUNTIME)),
(
"cudaRuntimeGetVersion",
("hipRuntimeGetVersion", CONV_VERSION, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaOccupancyMaxPotentialBlockSize",
("hipOccupancyMaxPotentialBlockSize", CONV_OCCUPANCY, API_RUNTIME),
),
(
"cudaOccupancyMaxPotentialBlockSizeWithFlags",
(
"hipOccupancyMaxPotentialBlockSizeWithFlags",
CONV_OCCUPANCY,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaOccupancyMaxActiveBlocksPerMultiprocessor",
(
"hipOccupancyMaxActiveBlocksPerMultiprocessor",
CONV_OCCUPANCY,
API_RUNTIME,
),
),
(
"cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags",
(
"hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags",
CONV_OCCUPANCY,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaOccupancyMaxPotentialBlockSizeVariableSMem",
(
"hipOccupancyMaxPotentialBlockSizeVariableSMem",
CONV_OCCUPANCY,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags",
(
"hipOccupancyMaxPotentialBlockSizeVariableSMemWithFlags",
CONV_OCCUPANCY,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
("cudaDeviceCanAccessPeer", ("hipDeviceCanAccessPeer", CONV_PEER, API_RUNTIME)),
(
"cudaDeviceDisablePeerAccess",
("hipDeviceDisablePeerAccess", CONV_PEER, API_RUNTIME),
),
(
"cudaDeviceEnablePeerAccess",
("hipDeviceEnablePeerAccess", CONV_PEER, API_RUNTIME),
),
("cudaMemcpyPeerAsync", ("hipMemcpyPeerAsync", CONV_MEM, API_RUNTIME)),
("cudaMemcpyPeer", ("hipMemcpyPeer", CONV_MEM, API_RUNTIME)),
(
"cudaIpcMemLazyEnablePeerAccess",
("hipIpcMemLazyEnablePeerAccess", CONV_TYPE, API_RUNTIME),
),
(
"cudaDeviceSetSharedMemConfig",
("hipDeviceSetSharedMemConfig", CONV_DEVICE, API_RUNTIME),
),
(
"cudaDeviceGetSharedMemConfig",
("hipDeviceGetSharedMemConfig", CONV_DEVICE, API_RUNTIME),
),
(
"cudaSharedMemBankSizeDefault",
("hipSharedMemBankSizeDefault", CONV_TYPE, API_RUNTIME),
),
(
"cudaSharedMemBankSizeFourByte",
("hipSharedMemBankSizeFourByte", CONV_TYPE, API_RUNTIME),
),
(
"cudaSharedMemBankSizeEightByte",
("hipSharedMemBankSizeEightByte", CONV_TYPE, API_RUNTIME),
),
(
"cudaLimitStackSize",
("hipLimitStackSize", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaLimitPrintfFifoSize",
("hipLimitPrintfFifoSize", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaLimitMallocHeapSize", ("hipLimitMallocHeapSize", CONV_TYPE, API_RUNTIME)),
(
"cudaLimitDevRuntimeSyncDepth",
("hipLimitDevRuntimeSyncDepth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaLimitDevRuntimePendingLaunchCount",
(
"hipLimitDevRuntimePendingLaunchCount",
CONV_TYPE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
("cudaDeviceGetLimit", ("hipDeviceGetLimit", CONV_DEVICE, API_RUNTIME)),
(
"cudaProfilerInitialize",
("hipProfilerInitialize", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaProfilerStart", ("hipProfilerStart", CONV_OTHER, API_RUNTIME)),
("cudaProfilerStop", ("hipProfilerStop", CONV_OTHER, API_RUNTIME)),
(
"cudaKeyValuePair",
("hipKeyValuePair", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED),
),
("cudaCSV", ("hipCSV", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)),
("cudaReadModeElementType", ("hipReadModeElementType", CONV_TEX, API_RUNTIME)),
(
"cudaReadModeNormalizedFloat",
("hipReadModeNormalizedFloat", CONV_TEX, API_RUNTIME),
),
("cudaFilterModePoint", ("hipFilterModePoint", CONV_TEX, API_RUNTIME)),
("cudaFilterModeLinear", ("hipFilterModeLinear", CONV_TEX, API_RUNTIME)),
("cudaBindTexture", ("hipBindTexture", CONV_TEX, API_RUNTIME)),
("cudaUnbindTexture", ("hipUnbindTexture", CONV_TEX, API_RUNTIME)),
("cudaBindTexture2D", ("hipBindTexture2D", CONV_TEX, API_RUNTIME)),
("cudaBindTextureToArray", ("hipBindTextureToArray", CONV_TEX, API_RUNTIME)),
(
"cudaBindTextureToMipmappedArray",
("hipBindTextureToMipmappedArray", CONV_TEX, API_RUNTIME),
),
(
"cudaGetTextureAlignmentOffset",
("hipGetTextureAlignmentOffset", CONV_TEX, API_RUNTIME),
),
("cudaGetTextureReference", ("hipGetTextureReference", CONV_TEX, API_RUNTIME)),
(
"cudaChannelFormatKindSigned",
("hipChannelFormatKindSigned", CONV_TEX, API_RUNTIME),
),
(
"cudaChannelFormatKindUnsigned",
("hipChannelFormatKindUnsigned", CONV_TEX, API_RUNTIME),
),
(
"cudaChannelFormatKindFloat",
("hipChannelFormatKindFloat", CONV_TEX, API_RUNTIME),
),
(
"cudaChannelFormatKindNone",
("hipChannelFormatKindNone", CONV_TEX, API_RUNTIME),
),
("cudaCreateChannelDesc", ("hipCreateChannelDesc", CONV_TEX, API_RUNTIME)),
("cudaGetChannelDesc", ("hipGetChannelDesc", CONV_TEX, API_RUNTIME)),
("cudaResourceTypeArray", ("hipResourceTypeArray", CONV_TEX, API_RUNTIME)),
(
"cudaResourceTypeMipmappedArray",
("hipResourceTypeMipmappedArray", CONV_TEX, API_RUNTIME),
),
("cudaResourceTypeLinear", ("hipResourceTypeLinear", CONV_TEX, API_RUNTIME)),
("cudaResourceTypePitch2D", ("hipResourceTypePitch2D", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatNone", ("hipResViewFormatNone", CONV_TEX, API_RUNTIME)),
(
"cudaResViewFormatUnsignedChar1",
("hipResViewFormatUnsignedChar1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedChar2",
("hipResViewFormatUnsignedChar2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedChar4",
("hipResViewFormatUnsignedChar4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedChar1",
("hipResViewFormatSignedChar1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedChar2",
("hipResViewFormatSignedChar2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedChar4",
("hipResViewFormatSignedChar4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedShort1",
("hipResViewFormatUnsignedShort1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedShort2",
("hipResViewFormatUnsignedShort2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedShort4",
("hipResViewFormatUnsignedShort4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedShort1",
("hipResViewFormatSignedShort1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedShort2",
("hipResViewFormatSignedShort2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedShort4",
("hipResViewFormatSignedShort4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedInt1",
("hipResViewFormatUnsignedInt1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedInt2",
("hipResViewFormatUnsignedInt2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedInt4",
("hipResViewFormatUnsignedInt4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedInt1",
("hipResViewFormatSignedInt1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedInt2",
("hipResViewFormatSignedInt2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedInt4",
("hipResViewFormatSignedInt4", CONV_TEX, API_RUNTIME),
),
("cudaResViewFormatHalf1", ("hipResViewFormatHalf1", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatHalf2", ("hipResViewFormatHalf2", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatHalf4", ("hipResViewFormatHalf4", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatFloat1", ("hipResViewFormatFloat1", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatFloat2", ("hipResViewFormatFloat2", CONV_TEX, API_RUNTIME)),
("cudaResViewFormatFloat4", ("hipResViewFormatFloat4", CONV_TEX, API_RUNTIME)),
(
"cudaResViewFormatUnsignedBlockCompressed1",
("hipResViewFormatUnsignedBlockCompressed1", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedBlockCompressed2",
("hipResViewFormatUnsignedBlockCompressed2", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedBlockCompressed3",
("hipResViewFormatUnsignedBlockCompressed3", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedBlockCompressed4",
("hipResViewFormatUnsignedBlockCompressed4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedBlockCompressed4",
("hipResViewFormatSignedBlockCompressed4", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedBlockCompressed5",
("hipResViewFormatUnsignedBlockCompressed5", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedBlockCompressed5",
("hipResViewFormatSignedBlockCompressed5", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedBlockCompressed6H",
("hipResViewFormatUnsignedBlockCompressed6H", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatSignedBlockCompressed6H",
("hipResViewFormatSignedBlockCompressed6H", CONV_TEX, API_RUNTIME),
),
(
"cudaResViewFormatUnsignedBlockCompressed7",
("hipResViewFormatUnsignedBlockCompressed7", CONV_TEX, API_RUNTIME),
),
("cudaAddressModeWrap", ("hipAddressModeWrap", CONV_TEX, API_RUNTIME)),
("cudaAddressModeClamp", ("hipAddressModeClamp", CONV_TEX, API_RUNTIME)),
("cudaAddressModeMirror", ("hipAddressModeMirror", CONV_TEX, API_RUNTIME)),
("cudaAddressModeBorder", ("hipAddressModeBorder", CONV_TEX, API_RUNTIME)),
("cudaCreateTextureObject", ("hipCreateTextureObject", CONV_TEX, API_RUNTIME)),
(
"cudaDestroyTextureObject",
("hipDestroyTextureObject", CONV_TEX, API_RUNTIME),
),
(
"cudaGetTextureObjectResourceDesc",
("hipGetTextureObjectResourceDesc", CONV_TEX, API_RUNTIME),
),
(
"cudaGetTextureObjectResourceViewDesc",
("hipGetTextureObjectResourceViewDesc", CONV_TEX, API_RUNTIME),
),
(
"cudaGetTextureObjectTextureDesc",
("hipGetTextureObjectTextureDesc", CONV_TEX, API_RUNTIME),
),
(
"cudaBindSurfaceToArray",
("hipBindSurfaceToArray", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetSurfaceReference",
("hipGetSurfaceReference", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaBoundaryModeZero",
("hipBoundaryModeZero", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaBoundaryModeClamp",
("hipBoundaryModeClamp", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaBoundaryModeTrap",
("hipBoundaryModeTrap", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaFormatModeForced",
("hipFormatModeForced", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaFormatModeAuto",
("hipFormatModeAuto", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaCreateSurfaceObject",
("hipCreateSurfaceObject", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaDestroySurfaceObject",
("hipDestroySurfaceObject", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGetSurfaceObjectResourceDesc",
(
"hipGetSurfaceObjectResourceDesc",
CONV_SURFACE,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
("cudaIpcCloseMemHandle", ("hipIpcCloseMemHandle", CONV_DEVICE, API_RUNTIME)),
("cudaIpcGetEventHandle", ("hipIpcGetEventHandle", CONV_DEVICE, API_RUNTIME)),
("cudaIpcGetMemHandle", ("hipIpcGetMemHandle", CONV_DEVICE, API_RUNTIME)),
("cudaIpcOpenEventHandle", ("hipIpcOpenEventHandle", CONV_DEVICE, API_RUNTIME)),
("cudaIpcOpenMemHandle", ("hipIpcOpenMemHandle", CONV_DEVICE, API_RUNTIME)),
(
"cudaGLGetDevices",
("hipGLGetDevices", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsGLRegisterBuffer",
("hipGraphicsGLRegisterBuffer", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsGLRegisterImage",
("hipGraphicsGLRegisterImage", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaWGLGetDevice",
("hipWGLGetDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsMapResources",
("hipGraphicsMapResources", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsResourceGetMappedMipmappedArray",
(
"hipGraphicsResourceGetMappedMipmappedArray",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsResourceGetMappedPointer",
(
"hipGraphicsResourceGetMappedPointer",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsResourceSetMapFlags",
(
"hipGraphicsResourceSetMapFlags",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsSubResourceGetMappedArray",
(
"hipGraphicsSubResourceGetMappedArray",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsUnmapResources",
("hipGraphicsUnmapResources", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsUnregisterResource",
(
"hipGraphicsUnregisterResource",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsCubeFacePositiveX",
(
"hipGraphicsCubeFacePositiveX",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsCubeFaceNegativeX",
(
"hipGraphicsCubeFaceNegativeX",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsCubeFacePositiveY",
(
"hipGraphicsCubeFacePositiveY",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsCubeFaceNegativeY",
(
"hipGraphicsCubeFaceNegativeY",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsCubeFacePositiveZ",
(
"hipGraphicsCubeFacePositiveZ",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsCubeFaceNegativeZ",
(
"hipGraphicsCubeFaceNegativeZ",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsMapFlagsNone",
("hipGraphicsMapFlagsNone", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsMapFlagsReadOnly",
(
"hipGraphicsMapFlagsReadOnly",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsMapFlagsWriteDiscard",
(
"hipGraphicsMapFlagsWriteDiscard",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsRegisterFlagsNone",
(
"hipGraphicsRegisterFlagsNone",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsRegisterFlagsReadOnly",
(
"hipGraphicsRegisterFlagsReadOnly",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsRegisterFlagsWriteDiscard",
(
"hipGraphicsRegisterFlagsWriteDiscard",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsRegisterFlagsSurfaceLoadStore",
(
"hipGraphicsRegisterFlagsSurfaceLoadStore",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsRegisterFlagsTextureGather",
(
"hipGraphicsRegisterFlagsTextureGather",
CONV_GRAPHICS,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGLDeviceListAll",
("HIP_GL_DEVICE_LIST_ALL", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLDeviceListCurrentFrame",
("HIP_GL_DEVICE_LIST_CURRENT_FRAME", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLDeviceListNextFrame",
("HIP_GL_DEVICE_LIST_NEXT_FRAME", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLGetDevices",
("hipGLGetDevices", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsGLRegisterBuffer",
("hipGraphicsGLRegisterBuffer", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsGLRegisterImage",
("hipGraphicsGLRegisterImage", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaWGLGetDevice",
("hipWGLGetDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLMapFlagsNone",
("HIP_GL_MAP_RESOURCE_FLAGS_NONE", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLMapFlagsReadOnly",
(
"HIP_GL_MAP_RESOURCE_FLAGS_READ_ONLY",
CONV_GL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGLMapFlagsWriteDiscard",
(
"HIP_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD",
CONV_GL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGLMapBufferObject",
("hipGLMapBufferObject__", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLMapBufferObjectAsync",
("hipGLMapBufferObjectAsync__", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLRegisterBufferObject",
("hipGLRegisterBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLSetBufferObjectMapFlags",
("hipGLSetBufferObjectMapFlags", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLSetGLDevice",
("hipGLSetGLDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLUnmapBufferObject",
("hipGLUnmapBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLUnmapBufferObjectAsync",
("hipGLUnmapBufferObjectAsync", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGLUnregisterBufferObject",
("hipGLUnregisterBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9DeviceListAll",
("HIP_D3D9_DEVICE_LIST_ALL", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9DeviceListCurrentFrame",
(
"HIP_D3D9_DEVICE_LIST_CURRENT_FRAME",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9DeviceListNextFrame",
(
"HIP_D3D9_DEVICE_LIST_NEXT_FRAME",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9GetDevice",
("hipD3D9GetDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9GetDevices",
("hipD3D9GetDevices", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9GetDirect3DDevice",
("hipD3D9GetDirect3DDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9SetDirect3DDevice",
("hipD3D9SetDirect3DDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsD3D9RegisterResource",
(
"hipGraphicsD3D9RegisterResource",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9MapFlags",
("hipD3D9MapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9MapFlagsNone",
(
"HIP_D3D9_MAPRESOURCE_FLAGS_NONE",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9MapFlagsReadOnly",
(
"HIP_D3D9_MAPRESOURCE_FLAGS_READONLY",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9MapFlagsWriteDiscard",
(
"HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9RegisterFlagsNone",
("HIP_D3D9_REGISTER_FLAGS_NONE", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9RegisterFlagsArray",
("HIP_D3D9_REGISTER_FLAGS_ARRAY", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9MapResources",
("hipD3D9MapResources", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9RegisterResource",
("hipD3D9RegisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9ResourceGetMappedArray",
("hipD3D9ResourceGetMappedArray", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9ResourceGetMappedPitch",
("hipD3D9ResourceGetMappedPitch", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9ResourceGetMappedPointer",
(
"hipD3D9ResourceGetMappedPointer",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9ResourceGetMappedSize",
("hipD3D9ResourceGetMappedSize", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9ResourceGetSurfaceDimensions",
(
"hipD3D9ResourceGetSurfaceDimensions",
CONV_D3D9,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D9ResourceSetMapFlags",
("hipD3D9ResourceSetMapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9UnmapResources",
("hipD3D9UnmapResources", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D9UnregisterResource",
("hipD3D9UnregisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10DeviceListAll",
("HIP_D3D10_DEVICE_LIST_ALL", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10DeviceListCurrentFrame",
(
"HIP_D3D10_DEVICE_LIST_CURRENT_FRAME",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10DeviceListNextFrame",
(
"HIP_D3D10_DEVICE_LIST_NEXT_FRAME",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10GetDevice",
("hipD3D10GetDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10GetDevices",
("hipD3D10GetDevices", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsD3D10RegisterResource",
(
"hipGraphicsD3D10RegisterResource",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10MapFlagsNone",
(
"HIP_D3D10_MAPRESOURCE_FLAGS_NONE",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10MapFlagsReadOnly",
(
"HIP_D3D10_MAPRESOURCE_FLAGS_READONLY",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10MapFlagsWriteDiscard",
(
"HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10RegisterFlagsNone",
("HIP_D3D10_REGISTER_FLAGS_NONE", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10RegisterFlagsArray",
(
"HIP_D3D10_REGISTER_FLAGS_ARRAY",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10GetDirect3DDevice",
("hipD3D10GetDirect3DDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10MapResources",
("hipD3D10MapResources", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10RegisterResource",
("hipD3D10RegisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10ResourceGetMappedArray",
(
"hipD3D10ResourceGetMappedArray",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10ResourceGetMappedPitch",
(
"hipD3D10ResourceGetMappedPitch",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10ResourceGetMappedPointer",
(
"hipD3D10ResourceGetMappedPointer",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10ResourceGetMappedSize",
("hipD3D10ResourceGetMappedSize", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10ResourceGetSurfaceDimensions",
(
"hipD3D10ResourceGetSurfaceDimensions",
CONV_D3D10,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D10ResourceSetMapFlags",
("hipD3D10ResourceSetMapFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10SetDirect3DDevice",
("hipD3D10SetDirect3DDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10UnmapResources",
("hipD3D10UnmapResources", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D10UnregisterResource",
("hipD3D10UnregisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D11DeviceListAll",
("HIP_D3D11_DEVICE_LIST_ALL", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D11DeviceListCurrentFrame",
(
"HIP_D3D11_DEVICE_LIST_CURRENT_FRAME",
CONV_D3D11,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D11DeviceListNextFrame",
(
"HIP_D3D11_DEVICE_LIST_NEXT_FRAME",
CONV_D3D11,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D11GetDevice",
("hipD3D11GetDevice", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D11GetDevices",
("hipD3D11GetDevices", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsD3D11RegisterResource",
(
"hipGraphicsD3D11RegisterResource",
CONV_D3D11,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaD3D11GetDevice",
("hipD3D11GetDevice", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaD3D11GetDevices",
("hipD3D11GetDevices", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsD3D11RegisterResource",
(
"hipGraphicsD3D11RegisterResource",
CONV_D3D11,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsVDPAURegisterOutputSurface",
(
"hipGraphicsVDPAURegisterOutputSurface",
CONV_VDPAU,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaGraphicsVDPAURegisterVideoSurface",
(
"hipGraphicsVDPAURegisterVideoSurface",
CONV_VDPAU,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaVDPAUGetDevice",
("hipVDPAUGetDevice", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaVDPAUSetVDPAUDevice",
("hipVDPAUSetDevice", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaEGLStreamConsumerAcquireFrame",
(
"hipEGLStreamConsumerAcquireFrame",
CONV_EGL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaEGLStreamConsumerConnect",
("hipEGLStreamConsumerConnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaEGLStreamConsumerConnectWithFlags",
(
"hipEGLStreamConsumerConnectWithFlags",
CONV_EGL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaEGLStreamConsumerReleaseFrame",
(
"hipEGLStreamConsumerReleaseFrame",
CONV_EGL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaEGLStreamProducerConnect",
("hipEGLStreamProducerConnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaEGLStreamProducerDisconnect",
("hipEGLStreamProducerDisconnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaEGLStreamProducerPresentFrame",
(
"hipEGLStreamProducerPresentFrame",
CONV_EGL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
(
"cudaEGLStreamProducerReturnFrame",
("hipEGLStreamProducerReturnFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsEGLRegisterImage",
("hipGraphicsEGLRegisterImage", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED),
),
(
"cudaGraphicsResourceGetMappedEglFrame",
(
"hipGraphicsResourceGetMappedEglFrame",
CONV_EGL,
API_RUNTIME,
HIP_UNSUPPORTED,
),
),
("cublasInit", ("hipblasInit", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasShutdown",
("hipblasShutdown", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasGetVersion",
("hipblasGetVersion", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasGetError",
("hipblasGetError", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasAlloc", ("hipblasAlloc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasFree", ("hipblasFree", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasSetKernelStream",
("hipblasSetKernelStream", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasGetAtomicsMode",
("hipblasGetAtomicsMode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSetAtomicsMode",
("hipblasSetAtomicsMode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasGetMathMode",
("hipblasGetMathMode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSetMathMode",
("hipblasSetMathMode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("CUBLAS_OP_N", ("HIPBLAS_OP_N", CONV_NUMERIC_LITERAL, API_BLAS)),
(
"CUBLAS_OP_T",
("HIPBLAS_OP_T", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_OP_C",
("HIPBLAS_OP_C", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_SUCCESS",
("HIPBLAS_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_NOT_INITIALIZED",
("HIPBLAS_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_ALLOC_FAILED",
("HIPBLAS_STATUS_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_INVALID_VALUE",
("HIPBLAS_STATUS_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_MAPPING_ERROR",
("HIPBLAS_STATUS_MAPPING_ERROR", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_EXECUTION_FAILED",
("HIPBLAS_STATUS_EXECUTION_FAILED", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_INTERNAL_ERROR",
("HIPBLAS_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_NOT_SUPPORTED",
("HIPBLAS_STATUS_NOT_SUPPORTED", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_STATUS_ARCH_MISMATCH",
("HIPBLAS_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_FILL_MODE_LOWER",
("HIPBLAS_FILL_MODE_LOWER", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_FILL_MODE_UPPER",
("HIPBLAS_FILL_MODE_UPPER", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_DIAG_NON_UNIT",
("HIPBLAS_DIAG_NON_UNIT", CONV_NUMERIC_LITERAL, API_BLAS),
),
("CUBLAS_DIAG_UNIT", ("HIPBLAS_DIAG_UNIT", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_SIDE_LEFT", ("HIPBLAS_SIDE_LEFT", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_SIDE_RIGHT", ("HIPBLAS_SIDE_RIGHT", CONV_NUMERIC_LITERAL, API_BLAS)),
(
"CUBLAS_POINTER_MODE_HOST",
("HIPBLAS_POINTER_MODE_HOST", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_POINTER_MODE_DEVICE",
("HIPBLAS_POINTER_MODE_DEVICE", CONV_NUMERIC_LITERAL, API_BLAS),
),
(
"CUBLAS_ATOMICS_NOT_ALLOWED",
(
"HIPBLAS_ATOMICS_NOT_ALLOWED",
CONV_NUMERIC_LITERAL,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"CUBLAS_ATOMICS_ALLOWED",
(
"HIPBLAS_ATOMICS_ALLOWED",
CONV_NUMERIC_LITERAL,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"CUBLAS_DATA_FLOAT",
(
"HIPBLAS_DATA_FLOAT",
CONV_NUMERIC_LITERAL,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"CUBLAS_DATA_DOUBLE",
(
"HIPBLAS_DATA_DOUBLE",
CONV_NUMERIC_LITERAL,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"CUBLAS_DATA_HALF",
("HIPBLAS_DATA_HALF", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED),
),
(
"CUBLAS_DATA_INT8",
("HIPBLAS_DATA_INT8", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED),
),
("CUBLAS_GEMM_DEFAULT", ("HIPBLAS_GEMM_DEFAULT", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUBLAS_GEMM_DEFAULT_TENSOR_OP", ("HIPBLAS_GEMM_DEFAULT", CONV_NUMERIC_LITERAL, API_BLAS)),
("cublasCreate", ("hipblasCreate", CONV_MATH_FUNC, API_BLAS)),
("cublasDestroy", ("hipblasDestroy", CONV_MATH_FUNC, API_BLAS)),
("cublasSetVector", ("hipblasSetVector", CONV_MATH_FUNC, API_BLAS)),
("cublasGetVector", ("hipblasGetVector", CONV_MATH_FUNC, API_BLAS)),
(
"cublasSetVectorAsync",
("hipblasSetVectorAsync", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasGetVectorAsync",
("hipblasGetVectorAsync", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSetMatrix", ("hipblasSetMatrix", CONV_MATH_FUNC, API_BLAS)),
("cublasGetMatrix", ("hipblasGetMatrix", CONV_MATH_FUNC, API_BLAS)),
(
"cublasGetMatrixAsync",
("hipblasGetMatrixAsync", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSetMatrixAsync",
("hipblasSetMatrixAsync", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasXerbla", ("hipblasXerbla", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSnrm2", ("hipblasSnrm2", CONV_MATH_FUNC, API_BLAS)),
("cublasDnrm2", ("hipblasDnrm2", CONV_MATH_FUNC, API_BLAS)),
("cublasScnrm2", ("hipblasScnrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDznrm2", ("hipblasDznrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasNrm2Ex",
("hipblasNrm2Ex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSdot", ("hipblasSdot", CONV_MATH_FUNC, API_BLAS)),
(
"cublasSdotBatched",
("hipblasSdotBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDdot", ("hipblasDdot", CONV_MATH_FUNC, API_BLAS)),
(
"cublasDdotBatched",
("hipblasDdotBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasCdotu", ("hipblasCdotu", CONV_MATH_FUNC, API_BLAS)),
("cublasCdotc", ("hipblasCdotc", CONV_MATH_FUNC, API_BLAS)),
("cublasZdotu", ("hipblasZdotu", CONV_MATH_FUNC, API_BLAS)),
("cublasZdotc", ("hipblasZdotc", CONV_MATH_FUNC, API_BLAS)),
("cublasSscal", ("hipblasSscal", CONV_MATH_FUNC, API_BLAS)),
(
"cublasSscalBatched",
("hipblasSscalBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDscal", ("hipblasDscal", CONV_MATH_FUNC, API_BLAS)),
(
"cublasDscalBatched",
("hipblasDscalBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasCscal", ("hipblasCscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsscal", ("hipblasCsscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZscal", ("hipblasZscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdscal", ("hipblasZdscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSaxpy", ("hipblasSaxpy", CONV_MATH_FUNC, API_BLAS)),
(
"cublasSaxpyBatched",
("hipblasSaxpyBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDaxpy", ("hipblasDaxpy", CONV_MATH_FUNC, API_BLAS)),
("cublasCaxpy", ("hipblasCaxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZaxpy", ("hipblasZaxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasScopy", ("hipblasScopy", CONV_MATH_FUNC, API_BLAS)),
(
"cublasScopyBatched",
("hipblasScopyBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDcopy", ("hipblasDcopy", CONV_MATH_FUNC, API_BLAS)),
(
"cublasDcopyBatched",
("hipblasDcopyBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasCcopy", ("hipblasCcopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZcopy", ("hipblasZcopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSswap", ("hipblasSswap", CONV_MATH_FUNC, API_BLAS)),
("cublasDswap", ("hipblasDswap", CONV_MATH_FUNC, API_BLAS)),
("cublasCswap", ("hipblasCswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZswap", ("hipblasZswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIsamax", ("hipblasIsamax", CONV_MATH_FUNC, API_BLAS)),
("cublasIdamax", ("hipblasIdamax", CONV_MATH_FUNC, API_BLAS)),
("cublasIcamax", ("hipblasIcamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIzamax", ("hipblasIzamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIsamin", ("hipblasIsamin", CONV_MATH_FUNC, API_BLAS)),
("cublasIdamin", ("hipblasIdamin", CONV_MATH_FUNC, API_BLAS)),
("cublasIcamin", ("hipblasIcamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasIzamin", ("hipblasIzamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSasum", ("hipblasSasum", CONV_MATH_FUNC, API_BLAS)),
(
"cublasSasumBatched",
("hipblasSasumBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDasum", ("hipblasDasum", CONV_MATH_FUNC, API_BLAS)),
(
"cublasDasumBatched",
("hipblasDasumBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasScasum", ("hipblasScasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDzasum", ("hipblasDzasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrot", ("hipblasSrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrot", ("hipblasDrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCrot", ("hipblasCrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsrot", ("hipblasCsrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZrot", ("hipblasZrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdrot", ("hipblasZdrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrotg", ("hipblasSrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrotg", ("hipblasDrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCrotg", ("hipblasCrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZrotg", ("hipblasZrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrotm", ("hipblasSrotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrotm", ("hipblasDrotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSrotmg", ("hipblasSrotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrotmg", ("hipblasDrotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgemv", ("hipblasSgemv", CONV_MATH_FUNC, API_BLAS)),
(
"cublasSgemvBatched",
("hipblasSgemvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDgemv", ("hipblasDgemv", CONV_MATH_FUNC, API_BLAS)),
("cublasCgemv", ("hipblasCgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgemv", ("hipblasZgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgbmv", ("hipblasSgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDgbmv", ("hipblasDgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgbmv", ("hipblasCgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgbmv", ("hipblasZgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrmv", ("hipblasStrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrmv", ("hipblasDtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrmv", ("hipblasCtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrmv", ("hipblasZtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStbmv", ("hipblasStbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtbmv", ("hipblasDtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtbmv", ("hipblasCtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtbmv", ("hipblasZtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStpmv", ("hipblasStpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtpmv", ("hipblasDtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtpmv", ("hipblasCtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtpmv", ("hipblasZtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrsv", ("hipblasStrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrsv", ("hipblasDtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrsv", ("hipblasCtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrsv", ("hipblasZtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStpsv", ("hipblasStpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtpsv", ("hipblasDtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtpsv", ("hipblasCtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtpsv", ("hipblasZtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStbsv", ("hipblasStbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtbsv", ("hipblasDtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtbsv", ("hipblasCtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtbsv", ("hipblasZtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsymv", ("hipblasSsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsymv", ("hipblasDsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsymv", ("hipblasCsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsymv", ("hipblasZsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChemv", ("hipblasChemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhemv", ("hipblasZhemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsbmv", ("hipblasSsbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsbmv", ("hipblasDsbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChbmv", ("hipblasChbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhbmv", ("hipblasZhbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspmv", ("hipblasSspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspmv", ("hipblasDspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpmv", ("hipblasChpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpmv", ("hipblasZhpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSger", ("hipblasSger", CONV_MATH_FUNC, API_BLAS)),
("cublasDger", ("hipblasDger", CONV_MATH_FUNC, API_BLAS)),
("cublasCgeru", ("hipblasCgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCgerc", ("hipblasCgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgeru", ("hipblasZgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgerc", ("hipblasZgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyr", ("hipblasSsyr", CONV_MATH_FUNC, API_BLAS)),
("cublasDsyr", ("hipblasDsyr", CONV_MATH_FUNC, API_BLAS)),
("cublasCher", ("hipblasCher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher", ("hipblasZher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspr", ("hipblasSspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspr", ("hipblasDspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpr", ("hipblasChpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpr", ("hipblasZhpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyr2", ("hipblasSsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyr2", ("hipblasDsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCher2", ("hipblasCher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher2", ("hipblasZher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspr2", ("hipblasSspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspr2", ("hipblasDspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpr2", ("hipblasChpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpr2", ("hipblasZhpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasSgemmBatched",
("hipblasSgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgemmBatched",
("hipblasDgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasHgemmBatched",
("hipblasHgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSgemmStridedBatched",
("hipblasSgemmStridedBatched", CONV_MATH_FUNC, API_BLAS),
),
(
"cublasDgemmStridedBatched",
("hipblasDgemmStridedBatched", CONV_MATH_FUNC, API_BLAS),
),
(
"cublasHgemmStridedBatched",
("hipblasHgemmStridedBatched", CONV_MATH_FUNC, API_BLAS),
),
(
"cublasCgemmBatched",
("hipblasCgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgemm3mBatched",
("hipblasCgemm3mBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgemmBatched",
("hipblasZgemmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgemmStridedBatched",
(
"hipblasCgemmStridedBatched",
CONV_MATH_FUNC,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"cublasCgemm3mStridedBatched",
(
"hipblasCgemm3mStridedBatched",
CONV_MATH_FUNC,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"cublasZgemmStridedBatched",
(
"hipblasZgemmStridedBatched",
CONV_MATH_FUNC,
API_BLAS,
HIP_UNSUPPORTED,
),
),
(
"cublasHgemmStridedBatched",
(
"hipblasHgemmStridedBatched",
CONV_MATH_FUNC,
API_BLAS,
HIP_UNSUPPORTED,
),
),
("cublasSgemm", ("hipblasSgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasDgemm", ("hipblasDgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasCgemm", ("hipblasCgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasZgemm", ("hipblasZgemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasHgemm", ("hipblasHgemm", CONV_MATH_FUNC, API_BLAS)),
("cublasSsyrk", ("hipblasSsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyrk", ("hipblasDsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyrk", ("hipblasCsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyrk", ("hipblasZsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCherk", ("hipblasCherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZherk", ("hipblasZherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyr2k", ("hipblasSsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyr2k", ("hipblasDsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyr2k", ("hipblasCsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyr2k", ("hipblasZyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsyrkx", ("hipblasSsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyrkx", ("hipblasDsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyrkx", ("hipblasCsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyrkx", ("hipblasZsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCher2k", ("hipblasCher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher2k", ("hipblasZher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCherkx", ("hipblasCherkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZherkx", ("hipblasZherkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSsymm", ("hipblasSsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsymm", ("hipblasDsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsymm", ("hipblasCsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsymm", ("hipblasZsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChemm", ("hipblasChemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhemm", ("hipblasZhemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrsm", ("hipblasStrsm", CONV_MATH_FUNC, API_BLAS)),
("cublasDtrsm", ("hipblasDtrsm", CONV_MATH_FUNC, API_BLAS)),
("cublasCtrsm", ("hipblasCtrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrsm", ("hipblasZtrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasStrsmBatched",
("hipblasStrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtrsmBatched",
("hipblasDtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtrsmBatched",
("hipblasCtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtrsmBatched",
("hipblasZtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasStrmm", ("hipblasStrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrmm", ("hipblasDtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrmm", ("hipblasCtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrmm", ("hipblasZtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSgeam", ("hipblasSgeam", CONV_MATH_FUNC, API_BLAS)),
("cublasDgeam", ("hipblasDgeam", CONV_MATH_FUNC, API_BLAS)),
("cublasCgeam", ("hipblasCgeam", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZgeam", ("hipblasZgeam", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasSgetrfBatched",
("hipblasSgetrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgetrfBatched",
("hipblasDgetrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgetrfBatched",
("hipblasCgetrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgetrfBatched",
("hipblasZgetrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSgetriBatched",
("hipblasSgetriBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgetriBatched",
("hipblasDgetriBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgetriBatched",
("hipblasCgetriBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgetriBatched",
("hipblasZgetriBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSgetrsBatched",
("hipblasSgetrsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgetrsBatched",
("hipblasDgetrsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgetrsBatched",
("hipblasCgetrsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgetrsBatched",
("hipblasZgetrsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStrsmBatched",
("hipblasStrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtrsmBatched",
("hipblasDtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtrsmBatched",
("hipblasCtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtrsmBatched",
("hipblasZtrsmBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSmatinvBatched",
("hipblasSmatinvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDmatinvBatched",
("hipblasDmatinvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCmatinvBatched",
("hipblasCmatinvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZmatinvBatched",
("hipblasZmatinvBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSgeqrfBatched",
("hipblasSgeqrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgeqrfBatched",
("hipblasDgeqrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgeqrfBatched",
("hipblasCgeqrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgeqrfBatched",
("hipblasZgeqrfBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSgelsBatched",
("hipblasSgelsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgelsBatched",
("hipblasDgelsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgelsBatched",
("hipblasCgelsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgelsBatched",
("hipblasZgelsBatched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSdgmm", ("hipblasSdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDdgmm", ("hipblasDdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCdgmm", ("hipblasCdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZdgmm", ("hipblasZdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStpttr", ("hipblasStpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtpttr", ("hipblasDtpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtpttr", ("hipblasCtpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtpttr", ("hipblasZtpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasStrttp", ("hipblasStrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDtrttp", ("hipblasDtrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCtrttp", ("hipblasCtrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZtrttp", ("hipblasZtrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCreate_v2", ("hipblasCreate_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasDestroy_v2", ("hipblasDestroy_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasGetVersion_v2",
("hipblasGetVersion_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSetWorkspace", ("hipblasSetWorkspace", CONV_MATH_FUNC, API_BLAS)),
("cublasSetStream", ("hipblasSetStream", CONV_MATH_FUNC, API_BLAS)),
("cublasGetStream", ("hipblasGetStream", CONV_MATH_FUNC, API_BLAS)),
("cublasSetStream_v2", ("hipblasSetStream_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasGetStream_v2", ("hipblasGetStream_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasGetPointerMode",
("hipblasGetPointerMode", CONV_MATH_FUNC, API_BLAS),
),
(
"cublasSetPointerMode",
("hipblasSetPointerMode", CONV_MATH_FUNC, API_BLAS),
),
(
"cublasGetPointerMode_v2",
("hipblasGetPointerMode_v2", CONV_MATH_FUNC, API_BLAS),
),
(
"cublasSetPointerMode_v2",
("hipblasSetPointerMode_v2", CONV_MATH_FUNC, API_BLAS),
),
("cublasSgemv_v2", ("hipblasSgemv_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasDgemv_v2", ("hipblasDgemv_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCgemv_v2",
("hipblasCgemv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgemv_v2",
("hipblasZgemv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSgbmv_v2",
("hipblasSgbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDgbmv_v2",
("hipblasDgbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgbmv_v2",
("hipblasCgbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgbmv_v2",
("hipblasZgbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStrmv_v2",
("hipblasStrmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtrmv_v2",
("hipblasDtrmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtrmv_v2",
("hipblasCtrmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtrmv_v2",
("hipblasZtrmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStbmv_v2",
("hipblasStbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtbmv_v2",
("hipblasDtbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtbmv_v2",
("hipblasCtbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtbmv_v2",
("hipblasZtbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStpmv_v2",
("hipblasStpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtpmv_v2",
("hipblasDtpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtpmv_v2",
("hipblasCtpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtpmv_v2",
("hipblasZtpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStrsv_v2",
("hipblasStrsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtrsv_v2",
("hipblasDtrsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtrsv_v2",
("hipblasCtrsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtrsv_v2",
("hipblasZtrsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStpsv_v2",
("hipblasStpsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtpsv_v2",
("hipblasDtpsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtpsv_v2",
("hipblasCtpsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtpsv_v2",
("hipblasZtpsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStbsv_v2",
("hipblasStbsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtbsv_v2",
("hipblasDtbsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtbsv_v2",
("hipblasCtbsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtbsv_v2",
("hipblasZtbsv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSsymv_v2",
("hipblasSsymv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDsymv_v2",
("hipblasDsymv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsymv_v2",
("hipblasCsymv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZsymv_v2",
("hipblasZsymv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasChemv_v2",
("hipblasChemv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZhemv_v2",
("hipblasZhemv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSsbmv_v2",
("hipblasSsbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDsbmv_v2",
("hipblasDsbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasChbmv_v2",
("hipblasChbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZhbmv_v2",
("hipblasZhbmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSspmv_v2",
("hipblasSspmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDspmv_v2",
("hipblasDspmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasChpmv_v2",
("hipblasChpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZhpmv_v2",
("hipblasZhpmv_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSger_v2", ("hipblasSger_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasDger_v2", ("hipblasDger_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCgeru_v2",
("hipblasCgeru_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgerc_v2",
("hipblasCergc_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgeru_v2",
("hipblasZgeru_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgerc_v2",
("hipblasZgerc_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSsyr_v2", ("hipblasSsyr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDsyr_v2", ("hipblasDsyr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCsyr_v2", ("hipblasCsyr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZsyr_v2", ("hipblasZsyr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCher_v2", ("hipblasCher_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZher_v2", ("hipblasZher_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSspr_v2", ("hipblasSspr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDspr_v2", ("hipblasDspr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasChpr_v2", ("hipblasChpr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasZhpr_v2", ("hipblasZhpr_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasSsyr2_v2",
("hipblasSsyr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDsyr2_v2",
("hipblasDsyr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsyr2_v2",
("hipblasCsyr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZsyr2_v2",
("hipblasZsyr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCher2_v2",
("hipblasCher2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZher2_v2",
("hipblasZher2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSspr2_v2",
("hipblasSspr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDspr2_v2",
("hipblasDspr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasChpr2_v2",
("hipblasChpr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZhpr2_v2",
("hipblasZhpr2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSgemm_v2", ("hipblasSgemm_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasDgemm_v2", ("hipblasDgemm_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCgemm_v2",
("hipblasCgemm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgemm3m",
("hipblasCgemm3m", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgemm3mEx",
("hipblasCgemm3mEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgemm_v2",
("hipblasZgemm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZgemm3m",
("hipblasZgemm3m", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSgemmEx",
("hipblasSgemmEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasGemmEx", ("hipblasGemmEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasGemmBatchedEx",
("hipblasGemmBatchedEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasGemmStridedBatchedEx",
("hipblasGemmStridedBatchedEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCgemmEx",
("hipblasCgemmEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasUint8gemmBias",
("hipblasUint8gemmBias", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSsyrk_v2",
("hipblasSsyrk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDsyrk_v2",
("hipblasDsyrk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsyrk_v2",
("hipblasCsyrk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZsyrk_v2",
("hipblasZsyrk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsyrkEx",
("hipblasCsyrkEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsyrk3mEx",
("hipblasCsyrk3mEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCherk_v2",
("hipblasCherk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCherkEx",
("hipblasCherkEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCherk3mEx",
("hipblasCherk3mEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZherk_v2",
("hipblasZherk_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSsyr2k_v2",
("hipblasSsyr2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDsyr2k_v2",
("hipblasDsyr2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsyr2k_v2",
("hipblasCsyr2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZsyr2k_v2",
("hipblasZsyr2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCher2k_v2",
("hipblasCher2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZher2k_v2",
("hipblasZher2k_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSsymm_v2",
("hipblasSsymm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDsymm_v2",
("hipblasDsymm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsymm_v2",
("hipblasCsymm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZsymm_v2",
("hipblasZsymm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasChemm_v2",
("hipblasChemm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZhemm_v2",
("hipblasZhemm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStrsm_v2",
("hipblasStrsm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtrsm_v2",
("hipblasDtrsm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtrsm_v2",
("hipblasCtrsm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtrsm_v2",
("hipblasZtrsm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasStrmm_v2",
("hipblasStrmm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDtrmm_v2",
("hipblasDtrmm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCtrmm_v2",
("hipblasCtrmm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZtrmm_v2",
("hipblasZtrmm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSnrm2_v2", ("hipblasSnrm2_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasDnrm2_v2", ("hipblasDnrm2_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasScnrm2_v2",
("hipblasScnrm2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDznrm2_v2",
("hipblasDznrm2_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasDotEx", ("hipblasDotEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDotcEx", ("hipblasDotcEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSdot_v2", ("hipblasSdot_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasDdot_v2", ("hipblasDdot_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCdotu_v2",
("hipblasCdotu_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCdotc_v2",
("hipblasCdotc_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZdotu_v2",
("hipblasZdotu_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZdotc_v2",
("hipblasZdotc_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasScalEx", ("hipblasScalEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSscal_v2", ("hipblasSscal_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasDscal_v2", ("hipblasDscal_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCscal_v2",
("hipblasCscal_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCsscal_v2",
("hipblasCsscal_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZscal_v2",
("hipblasZcsal_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZdscal_v2",
("hipblasZdscal_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasAxpyEx", ("hipblasAxpyEx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasSaxpy_v2", ("hipblasSaxpy_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasDaxpy_v2", ("hipblasDaxpy_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCaxpy_v2",
("hipblasCaxpy_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZaxpy_v2",
("hipblasZaxpy_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasScopy_v2", ("hipblasScopy_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasDcopy_v2", ("hipblasDcopy_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCcopy_v2",
("hipblasCcopy_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZcopy_v2",
("hipblasZcopy_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSswap_v2", ("hipblasSswap_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasDswap_v2", ("hipblasDswap_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasCswap_v2",
("hipblasCswap_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZswap_v2",
("hipblasZswap_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasIsamax_v2", ("hipblasIsamax_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasIdamax_v2", ("hipblasIdamax_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasIcamax_v2",
("hipblasIcamax_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasIzamax_v2",
("hipblasIzamax_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasIsamin_v2", ("hipblasIsamin_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasIdamin_v2", ("hipblasIdamin_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasIcamin_v2",
("hipblasIcamin_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasIzamin_v2",
("hipblasIzamin_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSasum_v2", ("hipblasSasum_v2", CONV_MATH_FUNC, API_BLAS)),
("cublasDasum_v2", ("hipblasDasum_v2", CONV_MATH_FUNC, API_BLAS)),
(
"cublasScasum_v2",
("hipblasScasum_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDzasum_v2",
("hipblasDzasum_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasSrot_v2", ("hipblasSrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasDrot_v2", ("hipblasDrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
("cublasCrot_v2", ("hipblasCrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasCsrot_v2",
("hipblasCsrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
("cublasZrot_v2", ("hipblasZrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)),
(
"cublasZdrot_v2",
("hipblasZdrot_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSrotg_v2",
("hipblasSrotg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDrotg_v2",
("hipblasDrotg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasCrotg_v2",
("hipblasCrotg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasZrotg_v2",
("hipblasZrotg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSrotm_v2",
("hipblasSrotm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDrotm_v2",
("hipblasDrotm_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasSrotmg_v2",
("hipblasSrotmg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasDrotmg_v2",
("hipblasDrotmg_v2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED),
),
(
"cublasComputeType_t",
("hipblasComputeType_t", CONV_MATH_FUNC, API_BLAS)
),
(
"CUBLAS_COMPUTE_32I",
("HIPBLAS_COMPUTE_32I", CONV_MATH_FUNC, API_BLAS)
),
(
"CUBLAS_COMPUTE_32F",
("HIPBLAS_COMPUTE_32F", CONV_MATH_FUNC, API_BLAS)
),
(
"CUBLAS_COMPUTE_32F_FAST_TF32",
("HIPBLAS_COMPUTE_32F_FAST_TF32", CONV_MATH_FUNC, API_BLAS)
),
(
"CUBLAS_COMPUTE_64F",
("HIPBLAS_COMPUTE_64F", CONV_MATH_FUNC, API_BLAS)
),
("cublasLtEpilogue_t", ("hipblasLtEpilogue_t", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_EPILOGUE_DEFAULT", ("HIPBLASLT_EPILOGUE_DEFAULT", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_EPILOGUE_RELU", ("HIPBLASLT_EPILOGUE_RELU", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_EPILOGUE_BIAS", ("HIPBLASLT_EPILOGUE_BIAS", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_EPILOGUE_RELU_BIAS", ("HIPBLASLT_EPILOGUE_RELU_BIAS", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_EPILOGUE_GELU", ("HIPBLASLT_EPILOGUE_GELU", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_EPILOGUE_GELU_BIAS", ("HIPBLASLT_EPILOGUE_GELU_BIAS", CONV_MATH_FUNC, API_BLAS)),
("cublasLtHandle_t", ("hipblasLtHandle_t", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulDesc_t", ("hipblasLtMatmulDesc_t", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulDescOpaque_t", ("hipblasLtMatmulDescOpaque_t", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulDescAttributes_t", ("hipblasLtMatmulDescAttributes_t", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_MATMUL_DESC_TRANSA", ("HIPBLASLT_MATMUL_DESC_TRANSA", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_MATMUL_DESC_TRANSB", ("HIPBLASLT_MATMUL_DESC_TRANSB", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_MATMUL_DESC_EPILOGUE", ("HIPBLASLT_MATMUL_DESC_EPILOGUE", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_MATMUL_DESC_BIAS_POINTER", ("HIPBLASLT_MATMUL_DESC_BIAS_POINTER", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_MATMUL_DESC_A_SCALE_POINTER", ("HIPBLASLT_MATMUL_DESC_A_SCALE_POINTER", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_MATMUL_DESC_B_SCALE_POINTER", ("HIPBLASLT_MATMUL_DESC_B_SCALE_POINTER", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_MATMUL_DESC_D_SCALE_POINTER", ("HIPBLASLT_MATMUL_DESC_D_SCALE_POINTER", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_MATMUL_DESC_AMAX_D_POINTER", ("HIPBLASLT_MATMUL_DESC_AMAX_D_POINTER", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_MATMUL_DESC_BIAS_DATA_TYPE", ("HIPBLASLT_MATMUL_DESC_BIAS_DATA_TYPE", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatrixLayout_t", ("hipblasLtMatrixLayout_t", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatrixLayoutOpaque_t", ("hipblasLtMatrixLayoutOpaque_t", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatrixLayoutAttribute_t", ("hipblasLtMatrixLayoutAttribute_t", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatrixLayoutCreate", ("hipblasLtMatrixLayoutCreate", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatrixLayoutDestroy", ("hipblasLtMatrixLayoutDestroy", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatrixLayoutSetAttribute", ("hipblasLtMatrixLayoutSetAttribute", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT", ("HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET", ("HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulPreference_t", ("hipblasLtMatmulPreference_t", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulPreferenceOpaque_t", ("hipblasLtMatmulPreferenceOpaque_t", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulPreferenceAttributes_t", ("hipblasLtMatmulPreferenceAttributes_t", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_MATMUL_PREF_SEARCH_MODE", ("HIPBLASLT_MATMUL_PREF_SEARCH_MODE", CONV_MATH_FUNC, API_BLAS)),
("CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES", ("HIPBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulAlgo_t", ("hipblasLtMatmulAlgo_t", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulHeuristicResult_t", ("hipblasLtMatmulHeuristicResult_t", CONV_MATH_FUNC, API_BLAS)),
("cublasLtCreate", ("hipblasLtCreate", CONV_MATH_FUNC, API_BLAS)),
("cublasLtDestroy", ("hipblasLtDestroy", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulDescCreate", ("hipblasLtMatmulDescCreate", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulDescDestroy", ("hipblasLtMatmulDescDestroy", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulDescSetAttribute", ("hipblasLtMatmulDescSetAttribute", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulPreferenceCreate", ("hipblasLtMatmulPreferenceCreate", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulPreferenceDestroy", ("hipblasLtMatmulPreferenceDestroy", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulPreferenceSetAttribute", ("hipblasLtMatmulPreferenceSetAttribute", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmulAlgoGetHeuristic", ("hipblasLtMatmulAlgoGetHeuristic", CONV_MATH_FUNC, API_BLAS)),
("cublasLtMatmul", ("hipblasLtMatmul", CONV_MATH_FUNC, API_BLAS)),
(
"CURAND_STATUS_SUCCESS",
("HIPRAND_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_VERSION_MISMATCH",
("HIPRAND_STATUS_VERSION_MISMATCH", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_NOT_INITIALIZED",
("HIPRAND_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_ALLOCATION_FAILED",
("HIPRAND_STATUS_ALLOCATION_FAILED", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_TYPE_ERROR",
("HIPRAND_STATUS_TYPE_ERROR", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_OUT_OF_RANGE",
("HIPRAND_STATUS_OUT_OF_RANGE", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_LENGTH_NOT_MULTIPLE",
("HIPRAND_STATUS_LENGTH_NOT_MULTIPLE", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_DOUBLE_PRECISION_REQUIRED",
(
"HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED",
CONV_NUMERIC_LITERAL,
API_RAND,
),
),
(
"CURAND_STATUS_LAUNCH_FAILURE",
("HIPRAND_STATUS_LAUNCH_FAILURE", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_PREEXISTING_FAILURE",
("HIPRAND_STATUS_PREEXISTING_FAILURE", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_INITIALIZATION_FAILED",
("HIPRAND_STATUS_INITIALIZATION_FAILED", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_ARCH_MISMATCH",
("HIPRAND_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_STATUS_INTERNAL_ERROR",
("HIPRAND_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_RAND),
),
("CURAND_RNG_TEST", ("HIPRAND_RNG_TEST", CONV_NUMERIC_LITERAL, API_RAND)),
(
"mtgp32dc_params_fast_11213",
("mtgp32dc_params_fast_11213", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_PSEUDO_DEFAULT",
("HIPRAND_RNG_PSEUDO_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_PSEUDO_XORWOW",
("HIPRAND_RNG_PSEUDO_XORWOW", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_PSEUDO_MRG32K3A",
("HIPRAND_RNG_PSEUDO_MRG32K3A", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_PSEUDO_MTGP32",
("HIPRAND_RNG_PSEUDO_MTGP32", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_PSEUDO_MT19937",
("HIPRAND_RNG_PSEUDO_MT19937", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_PSEUDO_PHILOX4_32_10",
("HIPRAND_RNG_PSEUDO_PHILOX4_32_10", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_QUASI_DEFAULT",
("HIPRAND_RNG_QUASI_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_QUASI_SOBOL32",
("HIPRAND_RNG_QUASI_SOBOL32", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_QUASI_SCRAMBLED_SOBOL32",
("HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL32", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_QUASI_SOBOL64",
("HIPRAND_RNG_QUASI_SOBOL64", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"CURAND_RNG_QUASI_SCRAMBLED_SOBOL64",
("HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL64", CONV_NUMERIC_LITERAL, API_RAND),
),
(
"curand_ORDERING_PSEUDO_BEST",
(
"HIPRAND_ORDERING_PSEUDO_BEST",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_ORDERING_PSEUDO_DEFAULT",
(
"HIPRAND_ORDERING_PSEUDO_DEFAULT",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_ORDERING_PSEUDO_SEEDED",
(
"HIPRAND_ORDERING_PSEUDO_SEEDED",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_ORDERING_QUASI_DEFAULT",
(
"HIPRAND_ORDERING_QUASI_DEFAULT",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_DIRECTION_VECTORS_32_JOEKUO6",
(
"HIPRAND_DIRECTION_VECTORS_32_JOEKUO6",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6",
(
"HIPRAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_DIRECTION_VECTORS_64_JOEKUO6",
(
"HIPRAND_DIRECTION_VECTORS_64_JOEKUO6",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6",
(
"HIPRAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6",
CONV_NUMERIC_LITERAL,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_CHOOSE_BEST",
("HIPRAND_CHOOSE_BEST", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_ITR",
("HIPRAND_ITR", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_KNUTH",
("HIPRAND_KNUTH", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_HITR",
("HIPRAND_HITR", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
("curand_M1", ("HIPRAND_M1", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
("curand_M2", ("HIPRAND_M2", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)),
(
"curand_BINARY_SEARCH",
("HIPRAND_BINARY_SEARCH", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_DISCRETE_GAUSS",
("HIPRAND_DISCRETE_GAUSS", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_REJECTION",
("HIPRAND_REJECTION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_DEVICE_API",
("HIPRAND_DEVICE_API", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_FAST_REJECTION",
("HIPRAND_FAST_REJECTION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_3RD",
("HIPRAND_3RD", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_DEFINITION",
("HIPRAND_DEFINITION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_POISSON",
("HIPRAND_POISSON", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED),
),
("curandCreateGenerator", ("hiprandCreateGenerator", CONV_MATH_FUNC, API_RAND)),
(
"curandCreateGeneratorHost",
("hiprandCreateGeneratorHost", CONV_MATH_FUNC, API_RAND),
),
(
"curandCreatePoissonDistribution",
("hiprandCreatePoissonDistribution", CONV_MATH_FUNC, API_RAND),
),
(
"curandDestroyDistribution",
("hiprandDestroyDistribution", CONV_MATH_FUNC, API_RAND),
),
(
"curandDestroyGenerator",
("hiprandDestroyGenerator", CONV_MATH_FUNC, API_RAND),
),
("curandGenerate", ("hiprandGenerate", CONV_MATH_FUNC, API_RAND)),
(
"curandGenerateLogNormal",
("hiprandGenerateLogNormal", CONV_MATH_FUNC, API_RAND),
),
(
"curandGenerateLogNormalDouble",
("hiprandGenerateLogNormalDouble", CONV_MATH_FUNC, API_RAND),
),
(
"curandGenerateLongLong",
("hiprandGenerateLongLong", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED),
),
("curandGenerateNormal", ("hiprandGenerateNormal", CONV_MATH_FUNC, API_RAND)),
(
"curandGenerateNormalDouble",
("hiprandGenerateNormalDouble", CONV_MATH_FUNC, API_RAND),
),
("curandGeneratePoisson", ("hiprandGeneratePoisson", CONV_MATH_FUNC, API_RAND)),
("curandGenerateSeeds", ("hiprandGenerateSeeds", CONV_MATH_FUNC, API_RAND)),
("curandGenerateUniform", ("hiprandGenerateUniform", CONV_MATH_FUNC, API_RAND)),
(
"curandGenerateUniformDouble",
("hiprandGenerateUniformDouble", CONV_MATH_FUNC, API_RAND),
),
(
"curandGetDirectionVectors32",
("hiprandGetDirectionVectors32", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED),
),
(
"curandGetDirectionVectors64",
("hiprandGetDirectionVectors64", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED),
),
(
"curandGetProperty",
("hiprandGetProperty", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED),
),
(
"curandGetScrambleConstants32",
(
"hiprandGetScrambleConstants32",
CONV_MATH_FUNC,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curandGetScrambleConstants64",
(
"hiprandGetScrambleConstants64",
CONV_MATH_FUNC,
API_RAND,
HIP_UNSUPPORTED,
),
),
("curandGetVersion", ("hiprandGetVersion", CONV_MATH_FUNC, API_RAND)),
(
"curandSetGeneratorOffset",
("hiprandSetGeneratorOffset", CONV_MATH_FUNC, API_RAND),
),
(
"curandSetGeneratorOrdering",
("hiprandSetGeneratorOrdering", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED),
),
(
"curandSetPseudoRandomGeneratorSeed",
("hiprandSetPseudoRandomGeneratorSeed", CONV_MATH_FUNC, API_RAND),
),
(
"curandSetQuasiRandomGeneratorDimensions",
("hiprandSetQuasiRandomGeneratorDimensions", CONV_MATH_FUNC, API_RAND),
),
("curandSetStream", ("hiprandSetStream", CONV_MATH_FUNC, API_RAND)),
("curand", ("hiprand", CONV_DEVICE_FUNC, API_RAND)),
("curand4", ("hiprand4", CONV_DEVICE_FUNC, API_RAND)),
("curand_init", ("hiprand_init", CONV_DEVICE_FUNC, API_RAND)),
("curand_log_normal", ("hiprand_log_normal", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_log_normal_double",
("hiprand_log_normal_double", CONV_DEVICE_FUNC, API_RAND),
),
("curand_log_normal2", ("hiprand_log_normal2", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_log_normal2_double",
("hiprand_log_normal2_double", CONV_DEVICE_FUNC, API_RAND),
),
("curand_log_normal4", ("hiprand_log_normal4", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_log_normal4_double",
("hiprand_log_normal4_double", CONV_DEVICE_FUNC, API_RAND),
),
(
"curand_mtgp32_single",
("hiprand_mtgp32_single", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED),
),
(
"curand_mtgp32_single_specific",
(
"hiprand_mtgp32_single_specific",
CONV_DEVICE_FUNC,
API_RAND,
HIP_UNSUPPORTED,
),
),
(
"curand_mtgp32_specific",
("hiprand_mtgp32_specific", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED),
),
("curand_normal", ("hiprand_normal", CONV_DEVICE_FUNC, API_RAND)),
(
"curandMakeMTGP32Constants",
("hiprandMakeMTGP32Constants", CONV_DEVICE_FUNC, API_RAND),
),
(
"curandMakeMTGP32KernelState",
("hiprandMakeMTGP32KernelState", CONV_DEVICE_FUNC, API_RAND),
),
("curand_normal_double", ("hiprand_normal_double", CONV_DEVICE_FUNC, API_RAND)),
("curand_normal2", ("hiprand_normal2", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_normal2_double",
("hiprand_normal2_double", CONV_DEVICE_FUNC, API_RAND),
),
("curand_normal4", ("hiprand_normal4", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_normal4_double",
("hiprand_normal4_double", CONV_DEVICE_FUNC, API_RAND),
),
("curand_uniform", ("hiprand_uniform", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_uniform_double",
("hiprand_uniform_double", CONV_DEVICE_FUNC, API_RAND),
),
(
"curand_uniform2_double",
("hiprand_uniform2_double", CONV_DEVICE_FUNC, API_RAND),
),
("curand_uniform4", ("hiprand_uniform4", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_uniform4_double",
("hiprand_uniform4_double", CONV_DEVICE_FUNC, API_RAND),
),
("curand_discrete", ("hiprand_discrete", CONV_DEVICE_FUNC, API_RAND)),
("curand_discrete4", ("hiprand_discrete4", CONV_DEVICE_FUNC, API_RAND)),
("curand_poisson", ("hiprand_poisson", CONV_DEVICE_FUNC, API_RAND)),
("curand_poisson4", ("hiprand_poisson4", CONV_DEVICE_FUNC, API_RAND)),
(
"curand_Philox4x32_10",
("hiprand_Philox4x32_10", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED),
),
("mtgp32_kernel_params", ("mtgp32_kernel_params_t", CONV_MATH_FUNC, API_RAND)),
("CUFFT_FORWARD", ("HIPFFT_FORWARD", CONV_NUMERIC_LITERAL, API_BLAS)),
("CUFFT_INVERSE", ("HIPFFT_BACKWARD", CONV_NUMERIC_LITERAL, API_BLAS)),
(
"CUFFT_COMPATIBILITY_DEFAULT",
(
"HIPFFT_COMPATIBILITY_DEFAULT",
CONV_NUMERIC_LITERAL,
API_BLAS,
HIP_UNSUPPORTED,
),
),
("cuComplex", ("hipComplex", CONV_TYPE, API_BLAS)),
("cuDoubleComplex", ("hipDoubleComplex", CONV_TYPE, API_BLAS)),
("cufftResult_t", ("hipfftResult_t", CONV_TYPE, API_FFT)),
("cufftResult", ("hipfftResult", CONV_TYPE, API_FFT)),
("CUFFT_SUCCESS", ("HIPFFT_SUCCESS", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_INVALID_PLAN", ("HIPFFT_INVALID_PLAN", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_ALLOC_FAILED", ("HIPFFT_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_INVALID_TYPE", ("HIPFFT_INVALID_TYPE", CONV_NUMERIC_LITERAL, API_FFT)),
(
"CUFFT_INVALID_VALUE",
("HIPFFT_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_FFT),
),
(
"CUFFT_INTERNAL_ERROR",
("HIPFFT_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_FFT),
),
("CUFFT_EXEC_FAILED", ("HIPFFT_EXEC_FAILED", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_SETUP_FAILED", ("HIPFFT_SETUP_FAILED", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_INVALID_SIZE", ("HIPFFT_INVALID_SIZE", CONV_NUMERIC_LITERAL, API_FFT)),
(
"CUFFT_UNALIGNED_DATA",
("HIPFFT_UNALIGNED_DATA", CONV_NUMERIC_LITERAL, API_FFT),
),
(
"CUFFT_INCOMPLETE_PARAMETER_LIST",
("HIPFFT_INCOMPLETE_PARAMETER_LIST", CONV_NUMERIC_LITERAL, API_FFT),
),
(
"CUFFT_INVALID_DEVICE",
("HIPFFT_INVALID_DEVICE", CONV_NUMERIC_LITERAL, API_FFT),
),
("CUFFT_PARSE_ERROR", ("HIPFFT_PARSE_ERROR", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_NO_WORKSPACE", ("HIPFFT_NO_WORKSPACE", CONV_NUMERIC_LITERAL, API_FFT)),
(
"CUFFT_NOT_IMPLEMENTED",
("HIPFFT_NOT_IMPLEMENTED", CONV_NUMERIC_LITERAL, API_FFT),
),
(
"CUFFT_LICENSE_ERROR",
("HIPFFT_LICENSE_ERROR", CONV_NUMERIC_LITERAL, API_FFT, HIP_UNSUPPORTED),
),
(
"CUFFT_NOT_SUPPORTED",
("HIPFFT_NOT_SUPPORTED", CONV_NUMERIC_LITERAL, API_FFT),
),
("cufftType_t", ("hipfftType_t", CONV_TYPE, API_FFT)),
("cufftType", ("hipfftType", CONV_TYPE, API_FFT)),
("CUFFT_R2C", ("HIPFFT_R2C", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_C2R", ("HIPFFT_C2R", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_C2C", ("HIPFFT_C2C", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_D2Z", ("HIPFFT_D2Z", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_Z2D", ("HIPFFT_Z2D", CONV_NUMERIC_LITERAL, API_FFT)),
("CUFFT_Z2Z", ("HIPFFT_Z2Z", CONV_NUMERIC_LITERAL, API_FFT)),
(
"cufftCompatibility_t",
("hipfftCompatibility_t", CONV_TYPE, API_FFT, HIP_UNSUPPORTED),
),
(
"cufftCompatibility",
("hipfftCompatibility", CONV_TYPE, API_FFT, HIP_UNSUPPORTED),
),
(
"CUFFT_COMPATIBILITY_FFTW_PADDING",
(
"HIPFFT_COMPATIBILITY_FFTW_PADDING",
CONV_NUMERIC_LITERAL,
API_FFT,
HIP_UNSUPPORTED,
),
),
("cufftReal", ("hipfftReal", CONV_TYPE, API_FFT)),
("cufftDoubleReal", ("hipfftDoubleReal", CONV_TYPE, API_FFT)),
("cufftComplex", ("hipfftComplex", CONV_TYPE, API_FFT)),
("cufftDoubleComplex", ("hipfftDoubleComplex", CONV_TYPE, API_FFT)),
("cufftHandle", ("hipfftHandle", CONV_TYPE, API_FFT)),
("cufftPlan1d", ("hipfftPlan1d", CONV_MATH_FUNC, API_FFT)),
("cufftPlan2d", ("hipfftPlan2d", CONV_MATH_FUNC, API_FFT)),
("cufftPlan3d", ("hipfftPlan3d", CONV_MATH_FUNC, API_FFT)),
("cufftPlanMany", ("hipfftPlanMany", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlan1d", ("hipfftMakePlan1d", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlan2d", ("hipfftMakePlan2d", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlan3d", ("hipfftMakePlan3d", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlanMany", ("hipfftMakePlanMany", CONV_MATH_FUNC, API_FFT)),
("cufftMakePlanMany64", ("hipfftMakePlanMany64", CONV_MATH_FUNC, API_FFT)),
("cufftGetSizeMany64", ("hipfftGetSizeMany64", CONV_MATH_FUNC, API_FFT)),
("cufftEstimate1d", ("hipfftEstimate1d", CONV_MATH_FUNC, API_FFT)),
("cufftEstimate2d", ("hipfftEstimate2d", CONV_MATH_FUNC, API_FFT)),
("cufftEstimate3d", ("hipfftEstimate3d", CONV_MATH_FUNC, API_FFT)),
("cufftEstimateMany", ("hipfftEstimateMany", CONV_MATH_FUNC, API_FFT)),
("cufftCreate", ("hipfftCreate", CONV_MATH_FUNC, API_FFT)),
("cufftGetSize1d", ("hipfftGetSize1d", CONV_MATH_FUNC, API_FFT)),
("cufftGetSize2d", ("hipfftGetSize2d", CONV_MATH_FUNC, API_FFT)),
("cufftGetSize3d", ("hipfftGetSize3d", CONV_MATH_FUNC, API_FFT)),
("cufftGetSizeMany", ("hipfftGetSizeMany", CONV_MATH_FUNC, API_FFT)),
("cufftGetSize", ("hipfftGetSize", CONV_MATH_FUNC, API_FFT)),
("cufftSetWorkArea", ("hipfftSetWorkArea", CONV_MATH_FUNC, API_FFT)),
(
"cufftSetAutoAllocation",
("hipfftSetAutoAllocation", CONV_MATH_FUNC, API_FFT),
),
("cufftXtExec", ("hipfftXtExec", CONV_MATH_FUNC, API_FFT)),
("cufftXtMakePlanMany", ("hipfftXtMakePlanMany", CONV_MATH_FUNC, API_FFT)),
("cufftExecC2C", ("hipfftExecC2C", CONV_MATH_FUNC, API_FFT)),
("cufftExecR2C", ("hipfftExecR2C", CONV_MATH_FUNC, API_FFT)),
("cufftExecC2R", ("hipfftExecC2R", CONV_MATH_FUNC, API_FFT)),
("cufftExecZ2Z", ("hipfftExecZ2Z", CONV_MATH_FUNC, API_FFT)),
("cufftExecD2Z", ("hipfftExecD2Z", CONV_MATH_FUNC, API_FFT)),
("cufftExecZ2D", ("hipfftExecZ2D", CONV_MATH_FUNC, API_FFT)),
("cufftSetStream", ("hipfftSetStream", CONV_MATH_FUNC, API_FFT)),
("cufftDestroy", ("hipfftDestroy", CONV_MATH_FUNC, API_FFT)),
("cufftGetVersion", ("hipfftGetVersion", CONV_MATH_FUNC, API_FFT)),
(
"cufftGetProperty",
("hipfftGetProperty", CONV_MATH_FUNC, API_FFT, HIP_UNSUPPORTED),
),
("nvrtcResult", ("hiprtcResult", CONV_TYPE, API_RTC)),
("NVRTC_SUCCESS", ("HIPRTC_SUCCESS", CONV_TYPE, API_RTC)),
(
"NVRTC_ERROR_OUT_OF_MEMORY",
("HIPRTC_ERROR_OUT_OF_MEMORY", CONV_TYPE, API_RTC),
),
(
"NVRTC_ERROR_PROGRAM_CREATION_FAILURE",
("HIPRTC_ERROR_PROGRAM_CREATION_FAILURE", CONV_TYPE, API_RTC),
),
(
"NVRTC_ERROR_INVALID_INPUT",
("HIPRTC_ERROR_INVALID_INPUT", CONV_TYPE, API_RTC),
),
(
"NVRTC_ERROR_INVALID_PROGRAM",
("HIPRTC_ERROR_INVALID_PROGRAM", CONV_TYPE, API_RTC),
),
("NVRTC_ERROR_COMPILATION", ("HIPRTC_ERROR_COMPILATION", CONV_TYPE, API_RTC)),
(
"NVRTC_ERROR_BUILTIN_OPERATION_FAILURE",
("HIPRTC_ERROR_BUILTIN_OPERATION_FAILURE", CONV_TYPE, API_RTC),
),
(
"NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION",
("HIPRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION", CONV_TYPE, API_RTC),
),
(
"NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID",
("HIPRTC_ERROR_NAME_EXPRESSION_NOT_VALID", CONV_TYPE, API_RTC),
),
(
"NVRTC_ERROR_INTERNAL_ERROR",
("HIPRTC_ERROR_INTERNAL_ERROR", CONV_TYPE, API_RTC),
),
("nvrtcGetErrorString", ("hiprtcGetErrorString", CONV_JIT, API_RTC)),
("nvrtcVersion", ("hiprtcVersion", CONV_JIT, API_RTC)),
("nvrtcProgram", ("hiprtcProgram", CONV_TYPE, API_RTC)),
("nvrtcAddNameExpression", ("hiprtcAddNameExpression", CONV_JIT, API_RTC)),
("nvrtcCompileProgram", ("hiprtcCompileProgram", CONV_JIT, API_RTC)),
("nvrtcCreateProgram", ("hiprtcCreateProgram", CONV_JIT, API_RTC)),
("nvrtcDestroyProgram", ("hiprtcDestroyProgram", CONV_JIT, API_RTC)),
("nvrtcGetLoweredName", ("hiprtcGetLoweredName", CONV_JIT, API_RTC)),
("nvrtcGetProgramLog", ("hiprtcGetProgramLog", CONV_JIT, API_RTC)),
("nvrtcGetProgramLogSize", ("hiprtcGetProgramLogSize", CONV_JIT, API_RTC)),
("nvrtcGetPTX", ("hiprtcGetCode", CONV_JIT, API_RTC)),
("nvrtcGetPTXSize", ("hiprtcGetCodeSize", CONV_JIT, API_RTC)),
("thrust::cuda", ("thrust::hip", CONV_MATH_FUNC, API_BLAS)),
(
"cudaCpuDeviceId",
("hipCpuDeviceId", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED),
),
# The caffe2 directory does a string match; pytorch does a word-boundary match.
# Patterns such as 'cub::' will not match for pytorch.
# We list all current uses of cub symbols for this reason.
("cub::", ("hipcub::", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::ArgMax", ("hipcub::ArgMax", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::ArgMin", ("hipcub::ArgMin", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BLOCK_SCAN_WARP_SCANS", ("hipcub::BLOCK_SCAN_WARP_SCANS", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BLOCK_REDUCE_WARP_REDUCTIONS", ("hipcub::BLOCK_REDUCE_WARP_REDUCTIONS", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BLOCK_STORE_WARP_TRANSPOSE", ("hipcub::BLOCK_STORE_WARP_TRANSPOSE", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BLOCK_LOAD_DIRECT", ("hipcub::BLOCK_LOAD_DIRECT", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BLOCK_STORE_DIRECT", ("hipcub::BLOCK_STORE_DIRECT", CONV_SPECIAL_FUNC, API_RUNTIME)),
(
"cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY",
("hipcub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY", CONV_SPECIAL_FUNC, API_RUNTIME)
),
("cub::BlockReduce", ("hipcub::BlockReduce", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BlockScan", ("hipcub::BlockScan", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BlockLoad", ("hipcub::BlockLoad", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BlockStore", ("hipcub::BlockStore", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BlockRakingLayout", ("hipcub::BlockRakingLayout", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BlockRadixSort", ("hipcub::BlockRadixSort", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::Uninitialized", ("hipcub::Uninitialized", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::RowMajorTid", ("hipcub::RowMajorTid", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::CachingDeviceAllocator", ("hipcub::CachingDeviceAllocator", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::CountingInputIterator", ("hipcub::CountingInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceRadixSort", ("hipcub::DeviceRadixSort", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceReduce", ("hipcub::DeviceReduce", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceRunLengthEncode", ("hipcub::DeviceRunLengthEncode", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceScan", ("hipcub::DeviceScan", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceSegmentedRadixSort", ("hipcub::DeviceSegmentedRadixSort", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceSegmentedReduce", ("hipcub::DeviceSegmentedReduce", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceSelect", ("hipcub::DeviceSelect", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::FpLimits", ("hipcub::FpLimits", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::KeyValuePair", ("hipcub::KeyValuePair", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::Max", ("hipcub::Max", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::Min", ("hipcub::Min", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::Sum", ("hipcub::Sum", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::Log2", ("hipcub::Log2", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::LaneId", ("hipcub::LaneId", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::WarpMask", ("hipcub::WarpMask", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::ShuffleIndex", ("hipcub::ShuffleIndex", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::ShuffleDown", ("hipcub::ShuffleDown", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::ArgIndexInputIterator", ("hipcub::ArgIndexInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::TransformInputIterator", ("hipcub::TransformInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::WarpReduce", ("hipcub::WarpReduce", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::CTA_SYNC", ("hipcub::CTA_SYNC", CONV_SPECIAL_FUNC, API_RUNTIME)),
("nvtxMark", ("roctxMark", CONV_OTHER, API_ROCTX)),
("nvtxMarkA", ("roctxMarkA", CONV_OTHER, API_ROCTX)),
("nvtxRangePushA", ("roctxRangePushA", CONV_OTHER, API_ROCTX)),
("nvtxRangePop", ("roctxRangePop", CONV_OTHER, API_ROCTX)),
("nvtxRangeStartA", ("roctxRangeStartA", CONV_OTHER, API_ROCTX)),
("nvtxRangeEnd", ("roctxRangeStop", CONV_OTHER, API_ROCTX)),
("nvtxRangeId_t", ("int", CONV_OTHER, API_ROCTX)),
("nvmlReturn_t", ("rsmi_status_t", CONV_OTHER, API_ROCMSMI)),
("NVML_SUCCESS", ("RSMI_STATUS_SUCCESS", CONV_OTHER, API_ROCMSMI)),
("NVML_P2P_CAPS_INDEX_READ", ("RSMI_STATUS_SUCCESS", CONV_OTHER, API_ROCMSMI)),
("NVML_P2P_STATUS_OK", ("RSMI_STATUS_SUCCESS", CONV_OTHER, API_ROCMSMI)),
("NVML_ERROR_INSUFFICIENT_SIZE", ("RSMI_STATUS_INSUFFICIENT_SIZE", CONV_OTHER, API_ROCMSMI)),
("nvmlDevice_t", ("uint32_t", CONV_OTHER, API_ROCMSMI)),
("nvmlGpuP2PStatus_t", ("bool", CONV_OTHER, API_ROCMSMI)),
("nvmlProcessInfo_t", ("rsmi_process_info_t", CONV_OTHER, API_ROCMSMI)),
("nvmlGpuP2PCapsIndex_t", ("uint32_t", CONV_OTHER, API_ROCMSMI)),
]
)
CUDA_SPECIAL_MAP = collections.OrderedDict(
[
# SPARSE
("cusparseStatus_t", ("hipsparseStatus_t", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseHandle_t", ("hipsparseHandle_t", CONV_MATH_FUNC, API_SPECIAL)),
("cuComplex", ("hipComplex", CONV_TYPE, API_SPECIAL)),
("cuDoubleComplex", ("hipDoubleComplex", CONV_TYPE, API_SPECIAL)),
(
"CUSPARSE_POINTER_MODE_HOST",
("HIPSPARSE_POINTER_MODE_HOST", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
("cusparseOperation_t", ("hipsparseOperation_t", CONV_TYPE, API_SPECIAL)),
(
"cusparseCreateMatDescr",
("hipsparseCreateMatDescr", CONV_MATH_FUNC, API_SPECIAL),
),
("cusparseCreate", ("hipsparseCreate", CONV_MATH_FUNC, API_SPECIAL)),
(
"cusparseDestroyMatDescr",
("hipsparseDestroyMatDescr", CONV_MATH_FUNC, API_SPECIAL),
),
("cusparseDestroy", ("hipsparseDestroy", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseXcoo2csr", ("hipsparseXcoo2csr", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseMatDescr_t", ("hipsparseMatDescr_t", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDiagType_t", ("hipsparseDiagType_t", CONV_TYPE, API_SPECIAL)),
("CUSPARSE_DIAG_TYPE_UNIT", ("HIPSPARSE_DIAG_TYPE_UNIT", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_DIAG_TYPE_NON_UNIT", ("HIPSPARSE_DIAG_TYPE_NON_UNIT", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("cusparseSetMatDiagType", ("hipsparseSetMatDiagType", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseFillMode_t", ("hipsparseFillMode_t", CONV_TYPE, API_SPECIAL)),
("CUSPARSE_FILL_MODE_UPPER", ("HIPSPARSE_FILL_MODE_UPPER", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_FILL_MODE_LOWER", ("HIPSPARSE_FILL_MODE_LOWER", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("cusparseSetMatFillMode", ("hipsparseSetMatFillMode", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDirection_t", ("hipsparseDirection_t", CONV_TYPE, API_SPECIAL)),
("CUSPARSE_DIRECTION_ROW", ("HIPSPARSE_DIRECTION_ROW", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_DIRECTION_COLUMN", ("HIPSPARSE_DIRECTION_COLUMN", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("cusparseSolvePolicy_t", ("hipsparseSolvePolicy_t", CONV_TYPE, API_SPECIAL)),
("CUSPARSE_SOLVE_POLICY_NO_LEVEL", ("HIPSPARSE_SOLVE_POLICY_NO_LEVEL", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_SOLVE_POLICY_USE_LEVEL", ("HIPSPARSE_SOLVE_POLICY_USE_LEVEL", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("cusparseCreateBsrsv2Info", ("hipsparseCreateBsrsv2Info", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCreateBsrsm2Info", ("hipsparseCreateBsrsm2Info", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDestroyBsrsv2Info", ("hipsparseDestroyBsrsv2Info", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDestroyBsrsm2Info", ("hipsparseDestroyBsrsm2Info", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSbsrmm", ("hipsparseSbsrmm", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDbsrmm", ("hipsparseDbsrmm", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCbsrmm", ("hipsparseCbsrmm", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseZbsrmm", ("hipsparseZbsrmm", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSbsrmv", ("hipsparseSbsrmv", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDbsrmv", ("hipsparseDbsrmv", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCbsrmv", ("hipsparseCbsrmv", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseZbsrmv", ("hipsparseZbsrmv", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSbsrsv2_bufferSize", ("hipsparseSbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDbsrsv2_bufferSize", ("hipsparseDbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCbsrsv2_bufferSize", ("hipsparseCbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseZbsrsv2_bufferSize", ("hipsparseZbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSbsrsv2_analysis", ("hipsparseSbsrsv2_analysis", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDbsrsv2_analysis", ("hipsparseDbsrsv2_analysis", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCbsrsv2_analysis", ("hipsparseCbsrsv2_analysis", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseZbsrsv2_analysis", ("hipsparseZbsrsv2_analysis", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSbsrsv2_solve", ("hipsparseSbsrsv2_solve", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDbsrsv2_solve", ("hipsparseDbsrsv2_solve", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCbsrsv2_solve", ("hipsparseCbsrsv2_solve", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseZbsrsv2_solve", ("hipsparseZbsrsv2_solve", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSbsrsm2_bufferSize", ("hipsparseSbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDbsrsm2_bufferSize", ("hipsparseDbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCbsrsm2_bufferSize", ("hipsparseCbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseZbsrsm2_bufferSize", ("hipsparseZbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSbsrsm2_analysis", ("hipsparseSbsrsm2_analysis", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDbsrsm2_analysis", ("hipsparseDbsrsm2_analysis", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCbsrsm2_analysis", ("hipsparseCbsrsm2_analysis", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseZbsrsm2_analysis", ("hipsparseZbsrsm2_analysis", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSbsrsm2_solve", ("hipsparseSbsrsm2_solve", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDbsrsm2_solve", ("hipsparseDbsrsm2_solve", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCbsrsm2_solve", ("hipsparseCbsrsm2_solve", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseZbsrsm2_solve", ("hipsparseZbsrsm2_solve", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseScsrmm2", ("hipsparseScsrmm2", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDcsrmm2", ("hipsparseDcsrmm2", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCcsrmm2", ("hipsparseCcsrmm2", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseZcsrmm2", ("hipsparseZcsrmm2", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseScsrmm", ("hipsparseScsrmm", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDcsrmm", ("hipsparseDcsrmm", CONV_MATH_FUNC, API_SPECIAL)),
(
"cusparseXcsrsort_bufferSizeExt",
("hipsparseXcsrsort_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL),
),
("cusparseCreateCsrgemm2Info", ("hipsparseCreateCsrgemm2Info", CONV_MATH_FUNC, API_SPECIAL)),
(
"cusparseDestroyCsrgemm2Info",
("hipsparseDestroyCsrgemm2Info", CONV_MATH_FUNC, API_SPECIAL),
),
("cusparseXcsrgemm2Nnz", ("hipsparseXcsrgemm2Nnz", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDcsrgemm2_bufferSizeExt", ("hipsparseDcsrgemm2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseScsrgemm2_bufferSizeExt", ("hipsparseScsrgemm2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDcsrgemm2", ("hipsparseDcsrgemm2", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseScsrgemm2", ("hipsparseScsrgemm2", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSetPointerMode", ("hipsparseSetPointerMode", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseXcsrgeam2Nnz", ("hipsparseXcsrgeam2Nnz", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseScsrgeam2_bufferSizeExt", ("hipsparseScsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDcsrgeam2_bufferSizeExt", ("hipsparseDcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCcsrgeam2_bufferSizeExt", ("hipsparseCcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseZcsrgeam2_bufferSizeExt", ("hipsparseZcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseScsrgeam2", ("hipsparseScsrgeam2", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDcsrgeam2", ("hipsparseDcsrgeam2", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCcsrgeam2", ("hipsparseCcsrgeam2", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseZcsrgeam2", ("hipsparseZcsrgeam2", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseXcsrsort", ("hipsparseXcsrsort", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseXbsrsm2_zeroPivot", ("hipsparseXbsrsm2_zeroPivot", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseXbsrsv2_zeroPivot", ("hipsparseXbsrsv2_zeroPivot", CONV_MATH_FUNC, API_SPECIAL)),
(
"cusparseXcoosort_bufferSizeExt",
("hipsparseXcoosort_bufferSizeExt", CONV_MATH_FUNC, API_SPECIAL),
),
(
"cusparseXcoosortByRow",
("hipsparseXcoosortByRow", CONV_MATH_FUNC, API_SPECIAL),
),
("cusparseSetStream", ("hipsparseSetStream", CONV_MATH_FUNC, API_SPECIAL)),
(
"cusparseCreateIdentityPermutation",
("hipsparseCreateIdentityPermutation", CONV_MATH_FUNC, API_SPECIAL),
),
(
"cusparseSetMatIndexBase",
("hipsparseSetMatIndexBase", CONV_MATH_FUNC, API_SPECIAL),
),
("cusparseSetMatType", ("hipsparseSetMatType", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSpMV", ("hipsparseSpMV", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSpMV_bufferSize", ("hipsparseSpMV_bufferSize", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSpMM", ("hipsparseSpMM", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSpMM_bufferSize", ("hipsparseSpMM_bufferSize", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCreateDnMat", ("hipsparseCreateDnMat", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDnMatSetStridedBatch", ("hipsparseDnMatSetStridedBatch", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCsrSetStridedBatch", ("hipsparseCsrSetStridedBatch", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCreateDnVec", ("hipsparseCreateDnVec", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCreateCsr", ("hipsparseCreateCsr", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDestroyDnMat", ("hipsparseDestroyDnMat", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDestroyDnVec", ("hipsparseDestroyDnVec", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDestroySpMat", ("hipsparseDestroySpMat", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSpGEMM_destroyDescr", ("hipsparseSpGEMM_destroyDescr", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCreateCoo", ("hipsparseCreateCoo", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCreateCsr", ("hipsparseCreateCsr", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSpGEMM_createDescr", ("hipsparseSpGEMM_createDescr", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseDnMatSetStridedBatch", ("hipsparseDnMatSetStridedBatch", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSpGEMM_copy", ("hipsparseSpGEMM_copy", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSDDMM_bufferSize", ("hipsparseSDDMM_bufferSize", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSDDMM_preprocess", ("hipsparseSDDMM_preprocess", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSDDMM", ("hipsparseSDDMM", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSpGEMM_compute", ("hipsparseSpGEMM_compute", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSpGEMM_workEstimation", ("hipsparseSpGEMM_workEstimation", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSpMatGetSize", ("hipsparseSpMatGetSize", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseCsrSetPointers", ("hipsparseCsrSetPointers", CONV_MATH_FUNC, API_SPECIAL)),
("cusparseSpMVAlg_t", ("hipsparseSpMVAlg_t", CONV_TYPE, API_SPECIAL)),
("cusparseSpMMAlg_t", ("hipsparseSpMMAlg_t", CONV_TYPE, API_SPECIAL)),
("cusparseIndexType_t", ("hipsparseIndexType_t", CONV_TYPE, API_SPECIAL)),
# Unsupported ("cusparseMatDescr", ("hipsparseMatDescr", CONV_TYPE, API_SPECIAL)),
# Unsupported ("cusparseDnMatDescr", ("hipsparseDnMatDescr", CONV_TYPE, API_SPECIAL)),
# Unsupported ("cusparseDnVecDescr", ("hipsparseDnVecDescr", CONV_TYPE, API_SPECIAL)),
# Unsupported ("cusparseSpMatDescr", ("hipsparseSpMatDescr", CONV_TYPE, API_SPECIAL)),
# Unsupported ("cusparseSpGEMMDescr", ("hipsparseSpGEMMDescr", CONV_TYPE, API_SPECIAL)),
("cusparseDnMatDescr_t", ("hipsparseDnMatDescr_t", CONV_TYPE, API_SPECIAL)),
("cusparseDnVecDescr_t", ("hipsparseDnVecDescr_t", CONV_TYPE, API_SPECIAL)),
("cusparseSpMatDescr_t", ("hipsparseSpMatDescr_t", CONV_TYPE, API_SPECIAL)),
("cusparseSpGEMMDescr_t", ("hipsparseSpGEMMDescr_t", CONV_TYPE, API_SPECIAL)),
("CUSPARSE_INDEX_32I", ("HIPSPARSE_INDEX_32I", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_INDEX_64I", ("HIPSPARSE_INDEX_64I", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_ORDER_COL", ("HIPSPARSE_ORDER_COLUMN", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_MV_ALG_DEFAULT", ("HIPSPARSE_MV_ALG_DEFAULT", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_MM_ALG_DEFAULT", ("HIPSPARSE_MM_ALG_DEFAULT", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_SPMM_COO_ALG1", ("HIPSPARSE_SPMM_COO_ALG1", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_SPMM_COO_ALG2", ("HIPSPARSE_SPMM_COO_ALG2", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_COOMV_ALG", ("HIPSPARSE_COOMV_ALG", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_SPMM_CSR_ALG1", ("HIPSPARSE_CSRMM_ALG1", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_SPGEMM_DEFAULT", ("HIPSPARSE_SPGEMM_DEFAULT", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSPARSE_SDDMM_ALG_DEFAULT", ("HIPSPARSE_SDDMM_ALG_DEFAULT", CONV_NUMERIC_LITERAL, API_SPECIAL)),
(
"CUSPARSE_STATUS_SUCCESS",
("HIPSPARSE_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUSPARSE_STATUS_NOT_INITIALIZED",
("HIPSPARSE_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUSPARSE_STATUS_ALLOC_FAILED",
("HIPSPARSE_STATUS_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUSPARSE_STATUS_INVALID_VALUE",
("HIPSPARSE_STATUS_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUSPARSE_STATUS_MAPPING_ERROR",
("HIPSPARSE_STATUS_MAPPING_ERROR", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUSPARSE_STATUS_EXECUTION_FAILED",
("HIPSPARSE_STATUS_EXECUTION_FAILED", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUSPARSE_STATUS_INTERNAL_ERROR",
("HIPSPARSE_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED",
(
"HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED",
CONV_NUMERIC_LITERAL,
API_SPECIAL,
),
),
(
"CUSPARSE_STATUS_ARCH_MISMATCH",
("HIPSPARSE_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUSPARSE_STATUS_ZERO_PIVOT",
("HIPSPARSE_STATUS_ZERO_PIVOT", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUSPARSE_OPERATION_TRANSPOSE",
("HIPSPARSE_OPERATION_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUSPARSE_OPERATION_NON_TRANSPOSE",
("HIPSPARSE_OPERATION_NON_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE",
(
"HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE",
CONV_NUMERIC_LITERAL,
API_SPECIAL,
),
),
(
"CUSPARSE_INDEX_BASE_ZERO",
("HIPSPARSE_INDEX_BASE_ZERO", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUSPARSE_INDEX_BASE_ONE",
("HIPSPARSE_INDEX_BASE_ONE", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUSPARSE_MATRIX_TYPE_GENERAL",
("HIPSPARSE_MATRIX_TYPE_GENERAL", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
# SOLVER
("cublasOperation_t", ("hipsolverOperation_t", CONV_TYPE, API_SPECIAL)),
("CUBLAS_OP_N", ("HIPSOLVER_OP_N", CONV_NUMERIC_LITERAL, API_SPECIAL)),
(
"CUBLAS_OP_T",
("HIPSOLVER_OP_T", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUBLAS_OP_C",
("HIPSOLVER_OP_C", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
("cublasFillMode_t", ("hipsolverFillMode_t", CONV_TYPE, API_SPECIAL)),
(
"CUBLAS_FILL_MODE_LOWER",
("HIPSOLVER_FILL_MODE_LOWER", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
(
"CUBLAS_FILL_MODE_UPPER",
("HIPSOLVER_FILL_MODE_UPPER", CONV_NUMERIC_LITERAL, API_SPECIAL),
),
("cublasSideMode_t", ("hipsolverSideMode_t", CONV_TYPE, API_SPECIAL)),
("CUBLAS_SIDE_LEFT", ("HIPSOLVER_SIDE_LEFT", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUBLAS_SIDE_RIGHT", ("HIPSOLVER_SIDE_RIGHT", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("cusolverEigMode_t", ("hipsolverEigMode_t", CONV_TYPE, API_SPECIAL)),
("CUSOLVER_EIG_MODE_VECTOR", ("HIPSOLVER_EIG_MODE_VECTOR", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("CUSOLVER_EIG_MODE_NOVECTOR", ("HIPSOLVER_EIG_MODE_NOVECTOR", CONV_NUMERIC_LITERAL, API_SPECIAL)),
("syevjInfo_t", ("hipsolverSyevjInfo_t", CONV_TYPE, API_SPECIAL)),
("cusolverDnCreateSyevjInfo", ("hipsolverDnCreateSyevjInfo", CONV_MATH_FUNC, API_SPECIAL)),
("cusolverDnXsyevjSetSortEig", ("hipsolverDnXsyevjSetSortEig", CONV_MATH_FUNC, API_SPECIAL)),
("cusolverDnDestroySyevjInfo", ("hipsolverDnDestroySyevjInfo", CONV_MATH_FUNC, API_SPECIAL)),
("gesvdjInfo_t", ("hipsolverGesvdjInfo_t", CONV_TYPE, API_SPECIAL)),
("cusolverDnCreateGesvdjInfo", ("hipsolverDnCreateGesvdjInfo", CONV_MATH_FUNC, API_SPECIAL)),
("cusolverDnXgesvdjSetSortEig", ("hipsolverDnXgesvdjSetSortEig", CONV_MATH_FUNC, API_SPECIAL)),
("cusolverDnDestroyGesvdjInfo", ("hipsolverDnDestroyGesvdjInfo", CONV_MATH_FUNC, API_SPECIAL)),
("cusolverDnHandle_t", ("hipsolverDnHandle_t", CONV_TYPE, API_SPECIAL)),
("cusolverDnCreate", ("hipsolverDnCreate", CONV_MATH_FUNC, API_SPECIAL)),
("cusolverDnSetStream", ("hipsolverDnSetStream", CONV_MATH_FUNC, API_SPECIAL)),
("cusolverDnDestroy", ("hipsolverDnDestroy", CONV_MATH_FUNC, API_SPECIAL)),
# from aten/src/ATen/native/hip/linalg/HIPSolver.cpp
('cusolverDnParams_t', ('hipsolverDnParams_t', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCgeqrf', ('hipsolverDnCgeqrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCgeqrf_bufferSize', ('hipsolverDnCgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCgesvd', ('hipsolverDnCgesvd', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCgesvd_bufferSize', ('hipsolverDnCgesvd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCgesvdj', ('hipsolverDnCgesvdj', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCgesvdjBatched', ('hipsolverDnCgesvdjBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCgesvdjBatched_bufferSize', ('hipsolverDnCgesvdjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCgesvdj_bufferSize', ('hipsolverDnCgesvdj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCgetrf', ('hipsolverDnCgetrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCgetrf_bufferSize', ('hipsolverDnCgetrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCgetrs', ('hipsolverDnCgetrs', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCheevd', ('hipsolverDnCheevd', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCheevd_bufferSize', ('hipsolverDnCheevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCheevj', ('hipsolverDnCheevj', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCheevjBatched', ('hipsolverDnCheevjBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCheevjBatched_bufferSize', ('hipsolverDnCheevjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCheevj_bufferSize', ('hipsolverDnCheevj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCpotrf', ('hipsolverDnCpotrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCpotrfBatched', ('hipsolverDnCpotrfBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCpotrf_bufferSize', ('hipsolverDnCpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCpotrs', ('hipsolverDnCpotrs', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCpotrsBatched', ('hipsolverDnCpotrsBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCungqr', ('hipsolverDnCungqr', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCungqr_bufferSize', ('hipsolverDnCungqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCunmqr', ('hipsolverDnCunmqr', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCunmqr_bufferSize', ('hipsolverDnCunmqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDgeqrf', ('hipsolverDnDgeqrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDgeqrf_bufferSize', ('hipsolverDnDgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDgesvd', ('hipsolverDnDgesvd', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDgesvd_bufferSize', ('hipsolverDnDgesvd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDgesvdj', ('hipsolverDnDgesvdj', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDgesvdjBatched', ('hipsolverDnDgesvdjBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDgesvdjBatched_bufferSize', ('hipsolverDnDgesvdjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDgesvdj_bufferSize', ('hipsolverDnDgesvdj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDgetrf', ('hipsolverDnDgetrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDgetrf_bufferSize', ('hipsolverDnDgetrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDgetrs', ('hipsolverDnDgetrs', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDorgqr', ('hipsolverDnDorgqr', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDorgqr_bufferSize', ('hipsolverDnDorgqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDormqr', ('hipsolverDnDormqr', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDormqr_bufferSize', ('hipsolverDnDormqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDpotrf', ('hipsolverDnDpotrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDpotrfBatched', ('hipsolverDnDpotrfBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDpotrf_bufferSize', ('hipsolverDnDpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDpotrs', ('hipsolverDnDpotrs', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDpotrsBatched', ('hipsolverDnDpotrsBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDsyevd', ('hipsolverDnDsyevd', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDsyevd_bufferSize', ('hipsolverDnDsyevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDsyevj', ('hipsolverDnDsyevj', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDsyevjBatched', ('hipsolverDnDsyevjBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDsyevjBatched_bufferSize', ('hipsolverDnDsyevjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDsyevj_bufferSize', ('hipsolverDnDsyevj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSgeqrf', ('hipsolverDnSgeqrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSgeqrf_bufferSize', ('hipsolverDnSgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSgesvd', ('hipsolverDnSgesvd', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSgesvd_bufferSize', ('hipsolverDnSgesvd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSgesvdj', ('hipsolverDnSgesvdj', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSgesvdjBatched', ('hipsolverDnSgesvdjBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSgesvdjBatched_bufferSize', ('hipsolverDnSgesvdjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSgesvdj_bufferSize', ('hipsolverDnSgesvdj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSgetrf', ('hipsolverDnSgetrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSgetrf_bufferSize', ('hipsolverDnSgetrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSgetrs', ('hipsolverDnSgetrs', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSorgqr', ('hipsolverDnSorgqr', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSorgqr_bufferSize', ('hipsolverDnSorgqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSormqr', ('hipsolverDnSormqr', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSormqr_bufferSize', ('hipsolverDnSormqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSpotrf', ('hipsolverDnSpotrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSpotrfBatched', ('hipsolverDnSpotrfBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSpotrf_bufferSize', ('hipsolverDnSpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSpotrs', ('hipsolverDnSpotrs', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSpotrsBatched', ('hipsolverDnSpotrsBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSsyevd', ('hipsolverDnSsyevd', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSsyevd_bufferSize', ('hipsolverDnSsyevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSsyevj', ('hipsolverDnSsyevj', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSsyevjBatched', ('hipsolverDnSsyevjBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSsyevjBatched_bufferSize', ('hipsolverDnSsyevjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSsyevj_bufferSize', ('hipsolverDnSsyevj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnXgeqrf', ('hipsolverDnXgeqrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnXgeqrf_bufferSize', ('hipsolverDnXgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnXpotrf', ('hipsolverDnXpotrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnXpotrf_bufferSize', ('hipsolverDnXpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnXpotrs', ('hipsolverDnXpotrs', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnXsyevd', ('hipsolverDnXsyevd', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnXsyevd_bufferSize', ('hipsolverDnXsyevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZgeqrf', ('hipsolverDnZgeqrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZgeqrf_bufferSize', ('hipsolverDnZgeqrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZgesvd', ('hipsolverDnZgesvd', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZgesvd_bufferSize', ('hipsolverDnZgesvd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZgesvdj', ('hipsolverDnZgesvdj', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZgesvdjBatched', ('hipsolverDnZgesvdjBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZgesvdjBatched_bufferSize', ('hipsolverDnZgesvdjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZgesvdj_bufferSize', ('hipsolverDnZgesvdj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZgetrf', ('hipsolverDnZgetrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZgetrf_bufferSize', ('hipsolverDnZgetrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZgetrs', ('hipsolverDnZgetrs', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZheevd', ('hipsolverDnZheevd', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZheevd_bufferSize', ('hipsolverDnZheevd_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZheevj', ('hipsolverDnZheevj', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZheevjBatched', ('hipsolverDnZheevjBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZheevjBatched_bufferSize', ('hipsolverDnZheevjBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZheevj_bufferSize', ('hipsolverDnZheevj_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZpotrf', ('hipsolverDnZpotrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZpotrfBatched', ('hipsolverDnZpotrfBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZpotrf_bufferSize', ('hipsolverDnZpotrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZpotrs', ('hipsolverDnZpotrs', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZpotrsBatched', ('hipsolverDnZpotrsBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZungqr', ('hipsolverDnZungqr', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZungqr_bufferSize', ('hipsolverDnZungqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZunmqr', ('hipsolverDnZunmqr', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZunmqr_bufferSize', ('hipsolverDnZunmqr_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
# sytrf
('cusolverDnDsytrf_bufferSize', ('hipsolverDnDsytrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSsytrf_bufferSize', ('hipsolverDnSsytrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZsytrf_bufferSize', ('hipsolverDnZsytrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCsytrf_bufferSize', ('hipsolverDnCsytrf_bufferSize', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDsytrf', ('hipsolverDnDsytrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnSsytrf', ('hipsolverDnSsytrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZsytrf', ('hipsolverDnZsytrf', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCsytrf', ('hipsolverDnCsytrf', CONV_MATH_FUNC, API_SPECIAL)),
# gesdva strided
(
'cusolverDnSgesvdaStridedBatched_bufferSize',
('hipsolverDnSgesvdaStridedBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)
),
(
'cusolverDnDgesvdaStridedBatched_bufferSize',
('hipsolverDnDgesvdaStridedBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)
),
(
'cusolverDnCgesvdaStridedBatched_bufferSize',
('hipsolverDnCgesvdaStridedBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)
),
(
'cusolverDnZgesvdaStridedBatched_bufferSize',
('hipsolverDnZgesvdaStridedBatched_bufferSize', CONV_MATH_FUNC, API_SPECIAL)
),
('cusolverDnSgesvdaStridedBatched', ('hipsolverDnSgesvdaStridedBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnDgesvdaStridedBatched', ('hipsolverDnDgesvdaStridedBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnCgesvdaStridedBatched', ('hipsolverDnCgesvdaStridedBatched', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnZgesvdaStridedBatched', ('hipsolverDnZgesvdaStridedBatched', CONV_MATH_FUNC, API_SPECIAL)),
# gesvdj SetXXX
('cusolverDnXgesvdjSetTolerance', ('hipsolverDnXgesvdjSetTolerance', CONV_MATH_FUNC, API_SPECIAL)),
('cusolverDnXgesvdjSetMaxSweeps', ('hipsolverDnXgesvdjSetMaxSweeps', CONV_MATH_FUNC, API_SPECIAL)),
]
)
PYTORCH_SPECIFIC_MAPPINGS = collections.OrderedDict(
[
("USE_CUDA", ("USE_ROCM", API_PYTORCH)),
("TORCH_CUDA_CPP_API", ("TORCH_HIP_CPP_API", API_PYTORCH)),
("TORCH_CUDA_CU_API", ("TORCH_HIP_API", API_PYTORCH)),
("CUDA_VERSION", ("TORCH_HIP_VERSION", API_PYTORCH)),
("cudaHostAllocator", ("hipHostAllocator", API_PYTORCH)),
("cudaDeviceAllocator", ("hipDeviceAllocator", API_PYTORCH)),
("define MAX_NUM_BLOCKS 200", ("define MAX_NUM_BLOCKS 64", API_PYTORCH)),
("cuda::CUDAGuard", ("hip::HIPGuardMasqueradingAsCUDA", API_PYTORCH)),
("CUDAGuard", ("HIPGuardMasqueradingAsCUDA", API_PYTORCH)),
(
"cuda::OptionalCUDAGuard",
("hip::OptionalHIPGuardMasqueradingAsCUDA", API_PYTORCH),
),
("OptionalCUDAGuard", ("OptionalHIPGuardMasqueradingAsCUDA", API_PYTORCH)),
(
"cuda::CUDAStreamGuard",
("hip::HIPStreamGuardMasqueradingAsCUDA", API_PYTORCH),
),
("CUDAStreamGuard", ("HIPStreamGuardMasqueradingAsCUDA", API_PYTORCH)),
(
"cuda::OptionalCUDAStreamGuard",
("hip::OptionalHIPStreamGuardMasqueradingAsCUDA", API_PYTORCH),
),
(
"OptionalCUDAStreamGuard",
("OptionalHIPStreamGuardMasqueradingAsCUDA", API_PYTORCH),
),
(
"cuda::CUDAMultiStreamGuard",
("hip::HIPMultiStreamGuardMasqueradingAsCUDA", API_PYTORCH),
),
(
"CUDAMultiStreamGuard",
("HIPMultiStreamGuardMasqueradingAsCUDA", API_PYTORCH),
),
# Only get needs to be transformed this way; all the other ones can go
# straight to the normal versions hip::HIPCachingAllocator
(
"cuda::CUDACachingAllocator::get",
("hip::HIPCachingAllocatorMasqueradingAsCUDA::get", API_PYTORCH),
),
(
"CUDACachingAllocator::get",
("HIPCachingAllocatorMasqueradingAsCUDA::get", API_PYTORCH),
),
(
"cuda::CUDACachingAllocator::recordStream",
(
"hip::HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA",
API_PYTORCH,
),
),
(
"CUDACachingAllocator::recordStream",
(
"HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA",
API_PYTORCH,
),
),
(
"cuda::CUDAAllocator::recordStream",
(
"hip::HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA",
API_PYTORCH,
),
),
(
"CUDAAllocator::recordStream",
(
"HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA",
API_PYTORCH,
),
),
("cuda::CUDAStream", ("hip::HIPStreamMasqueradingAsCUDA", API_PYTORCH)),
("CUDAStream", ("HIPStreamMasqueradingAsCUDA", API_PYTORCH)),
(
"cuda::getStreamFromPool",
("hip::getStreamFromPoolMasqueradingAsCUDA", API_PYTORCH),
),
("getStreamFromPool", ("getStreamFromPoolMasqueradingAsCUDA", API_PYTORCH)),
(
"cuda::getDefaultCUDAStream",
("hip::getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
(
"cuda::getStreamFromExternal",
("hip::getStreamFromExternalMasqueradingAsCUDA", API_PYTORCH),
),
("getStreamFromExternal", ("getStreamFromExternalMasqueradingAsCUDA", API_PYTORCH)),
(
"cuda::getDefaultCUDAStream",
("hip::getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
(
"getDefaultCUDAStream",
("getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
(
"cuda::getCurrentCUDAStream",
("hip::getCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
(
"getCurrentCUDAStream",
("getCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
(
"cuda::setCurrentCUDAStream",
("hip::setCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
(
"setCurrentCUDAStream",
("setCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH),
),
(
"ATen/cudnn/Handle.h",
("ATen/miopen/Handle.h", API_PYTORCH),
),
# TODO: Undo this special-case; see the header for motivation behind this
# hack. It's VERY important this is only applied to PyTorch HIPify.
(
"c10/cuda/CUDAGuard.h",
("ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h", API_PYTORCH),
),
(
"c10/cuda/CUDACachingAllocator.h",
("ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h", API_PYTORCH),
),
(
"c10/cuda/CUDAStream.h",
("ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h", API_PYTORCH),
),
("gloo/cuda.h", ("gloo/hip.h", API_PYTORCH)),
(
"gloo/cuda_allreduce_halving_doubling.h",
("gloo/hip_allreduce_halving_doubling.h", API_PYTORCH),
),
(
"gloo/cuda_allreduce_halving_doubling_pipelined.h",
("gloo/hip_allreduce_halving_doubling_pipelined.h", API_PYTORCH),
),
("gloo/cuda_allreduce_ring.h", ("gloo/hip_allreduce_ring.h", API_PYTORCH)),
(
"gloo/cuda_broadcast_one_to_all.h",
("gloo/hip_broadcast_one_to_all.h", API_PYTORCH),
),
(
"gloo::CudaAllreduceHalvingDoublingPipelined",
("gloo::HipAllreduceHalvingDoublingPipelined", API_PYTORCH),
),
("gloo::CudaBroadcastOneToAll", ("gloo::HipBroadcastOneToAll", API_PYTORCH)),
("gloo::CudaHostWorkspace", ("gloo::HipHostWorkspace", API_PYTORCH)),
("gloo::CudaDeviceWorkspace", ("gloo::HipDeviceWorkspace", API_PYTORCH)),
("CUDNN_RNN_RELU", ("miopenRNNRELU", API_PYTORCH)),
("CUDNN_RNN_TANH", ("miopenRNNTANH", API_PYTORCH)),
("CUDNN_LSTM", ("miopenLSTM", API_PYTORCH)),
("CUDNN_GRU", ("miopenGRU", API_PYTORCH)),
("cudnnRNNMode_t", ("miopenRNNMode_t", API_PYTORCH)),
("magma_queue_create_from_cuda", ("magma_queue_create_from_hip", API_PYTORCH)),
]
)
CAFFE2_SPECIFIC_MAPPINGS = collections.OrderedDict(
[
("PYTORCH_NO_CUDA_MEMORY_CACHING", ("PYTORCH_NO_CUDA_MEMORY_CACHING", API_CAFFE2)),
("cuda_stream", ("hip_stream", API_CAFFE2)),
# if the header is a native hip folder (under hip directory),
# there is no need to add a hip path to it; the trie in hipify script
# takes this mapping order to forbid further replacement
("/hip/", ("/hip/", API_CAFFE2)),
("/context_gpu", ("/hip/context_gpu", API_CAFFE2)),
("/common_gpu", ("/hip/common_gpu", API_CAFFE2)),
("/cuda_nccl_gpu", ("/hip/hip_nccl_gpu", API_CAFFE2)),
("/mixed_utils", ("/hip/mixed_utils", API_CAFFE2)),
("/operator_fallback_gpu", ("/hip/operator_fallback_gpu", API_CAFFE2)),
(
"/spatial_batch_norm_op_impl",
("/hip/spatial_batch_norm_op_impl", API_CAFFE2),
),
(
"/recurrent_network_executor_gpu",
("/hip/recurrent_network_executor_gpu", API_CAFFE2),
),
(
"/generate_proposals_op_util_nms_gpu",
("/hip/generate_proposals_op_util_nms_gpu", API_CAFFE2),
),
("/max_pool_with_index_gpu", ("/hip/max_pool_with_index_gpu", API_CAFFE2)),
("/THCCachingAllocator_gpu", ("/hip/THCCachingAllocator_gpu", API_CAFFE2)),
("/top_k_heap_selection", ("/hip/top_k_heap_selection", API_CAFFE2)),
("/top_k_radix_selection", ("/hip/top_k_radix_selection", API_CAFFE2)),
("/GpuAtomics", ("/hip/GpuAtomics", API_CAFFE2)),
("/GpuDefs", ("/hip/GpuDefs", API_CAFFE2)),
("/GpuScanUtils", ("/hip/GpuScanUtils", API_CAFFE2)),
("/GpuBitonicSort", ("/hip/GpuBitonicSort", API_CAFFE2)),
("/math/reduce.cuh", ("/math/hip/reduce.cuh", API_CAFFE2)),
("/sgd/adagrad_fused_op_gpu.cuh", ("/sgd/hip/adagrad_fused_op_gpu.cuh", API_CAFFE2)),
("/operators/segment_reduction_op_gpu.cuh", ("/operators/hip/segment_reduction_op_gpu.cuh", API_CAFFE2)),
("/gather_op.cuh", ("/hip/gather_op.cuh", API_CAFFE2)),
("caffe2/core/common_cudnn.h", ("caffe2/core/hip/common_miopen.h", API_CAFFE2)),
("REGISTER_CUDA_OPERATOR", ("REGISTER_HIP_OPERATOR", API_CAFFE2)),
("CUDA_1D_KERNEL_LOOP", ("HIP_1D_KERNEL_LOOP", API_CAFFE2)),
("CUDAContext", ("HIPContext", API_CAFFE2)),
("CAFFE_CUDA_NUM_THREADS", ("CAFFE_HIP_NUM_THREADS", API_CAFFE2)),
("HasCudaGPU", ("HasHipGPU", API_CAFFE2)),
("__expf", ("expf", API_CAFFE2)),
("CUBLAS_ENFORCE", ("HIPBLAS_ENFORCE", API_CAFFE2)),
("CUBLAS_CHECK", ("HIPBLAS_CHECK", API_CAFFE2)),
("cublas_handle", ("hipblas_handle", API_CAFFE2)),
("CURAND_ENFORCE", ("HIPRAND_ENFORCE", API_CAFFE2)),
("CURAND_CHECK", ("HIPRAND_CHECK", API_CAFFE2)),
("curandGenerateUniform", ("hiprandGenerateUniform", API_CAFFE2)),
("curand_generator", ("hiprand_generator", API_CAFFE2)),
("CaffeCudaGetDevice", ("CaffeHipGetDevice", API_CAFFE2)),
# do not rename CUDA_KERNEL_ASSERT, lazyInitCUDA in caffe2 sources
# the ordered dict guarantees this pattern will match first, before "CUDA"
("CUDA_KERNEL_ASSERT", ("CUDA_KERNEL_ASSERT", API_CAFFE2)),
("lazyInitCUDA", ("lazyInitCUDA", API_CAFFE2)),
("CUDA_VERSION", ("TORCH_HIP_VERSION", API_CAFFE2)),
("CUDA", ("HIP", API_CAFFE2)),
("Cuda", ("Hip", API_CAFFE2)),
("cuda_", ("hip_", API_CAFFE2)),
("_cuda", ("_hip", API_CAFFE2)),
("CUDNN", ("MIOPEN", API_CAFFE2)),
("CuDNN", ("MIOPEN", API_CAFFE2)),
("cudnn", ("miopen", API_CAFFE2)),
("namespace cuda", ("namespace hip", API_CAFFE2)),
("cuda::CUDAGuard", ("hip::HIPGuard", API_CAFFE2)),
("cuda::OptionalCUDAGuard", ("hip::OptionalHIPGuard", API_CAFFE2)),
("cuda::CUDAStreamGuard", ("hip::HIPStreamGuard", API_CAFFE2)),
("cuda::OptionalCUDAStreamGuard", ("hip::OptionalHIPStreamGuard", API_CAFFE2)),
("c10/cuda/CUDAGuard.h", ("c10/hip/HIPGuard.h", API_CAFFE2)),
("gloo/cuda", ("gloo/hip", API_CAFFE2)),
]
)
# We must tread very carefully here. Blanket conversions like are done
# in CAFFE2_SPECIFIC_MAPPINGS are not presently supported on PyTorch,
# because a regex for CUDA will also match a filename like CUDAGuard.h,
# but the HIPIFY script doesn't presently move the file and so the substitution
# will be invalid. Instead, we specifically list out every identifier
# and file from c10/cuda which may be used externally, and do substitutions this
# way.
#
# NB: if you want a transformation to ONLY apply to the c10/ directory,
# put it as API_CAFFE2
C10_MAPPINGS = collections.OrderedDict(
[
("CUDA_VERSION", ("TORCH_HIP_VERSION", API_PYTORCH)),
("CUDA_LAUNCH_BLOCKING=1", ("AMD_SERIALIZE_KERNEL=3", API_C10)),
("CUDA_LAUNCH_BLOCKING", ("AMD_SERIALIZE_KERNEL", API_C10)),
("cuda::compat::", ("hip::compat::", API_C10)),
("c10/cuda/CUDAAlgorithm.h", ("c10/hip/HIPAlgorithm.h", API_C10)),
("c10/cuda/CUDADeviceAssertion.h", ("c10/hip/HIPDeviceAssertion.h", API_C10)),
("c10/cuda/CUDADeviceAssertionHost.h", ("c10/hip/HIPDeviceAssertionHost.h", API_C10)),
("c10/cuda/CUDAException.h", ("c10/hip/HIPException.h", API_C10)),
("c10/cuda/CUDAMacros.h", ("c10/hip/HIPMacros.h", API_C10)),
("c10/cuda/CUDAMathCompat.h", ("c10/hip/HIPMathCompat.h", API_C10)),
("c10/cuda/CUDAFunctions.h", ("c10/hip/HIPFunctions.h", API_C10)),
("c10/cuda/CUDAMiscFunctions.h", ("c10/hip/HIPMiscFunctions.h", API_C10)),
("c10/cuda/CUDAStream.h", ("c10/hip/HIPStream.h", API_C10)),
("c10/cuda/CUDAGraphsC10Utils.h", ("c10/hip/HIPGraphsC10Utils.h", API_C10)),
("c10/cuda/CUDAAllocatorConfig.h", ("c10/hip/HIPAllocatorConfig.h", API_C10)),
("c10/cuda/CUDACachingAllocator.h", ("c10/hip/HIPCachingAllocator.h", API_C10)),
("c10/cuda/impl/CUDATest.h", ("c10/hip/impl/HIPTest.h", API_C10)),
("c10/cuda/impl/CUDAGuardImpl.h", ("c10/hip/impl/HIPGuardImpl.h", API_C10)),
(
"c10/cuda/impl/cuda_cmake_macros.h",
("c10/hip/impl/hip_cmake_macros.h", API_C10),
),
("C10_CUDA_CHECK", ("C10_HIP_CHECK", API_C10)),
("C10_CUDA_CHECK_WARN", ("C10_HIP_CHECK_WARN", API_C10)),
("C10_CUDA_ERROR_HANDLED", ("C10_HIP_ERROR_HANDLED", API_C10)),
("C10_CUDA_IGNORE_ERROR", ("C10_HIP_IGNORE_ERROR", API_C10)),
("C10_CUDA_CLEAR_ERROR", ("C10_HIP_CLEAR_ERROR", API_C10)),
("c10::cuda", ("c10::hip", API_C10)),
("cuda::CUDAStream", ("hip::HIPStream", API_C10)),
("CUDAStream", ("HIPStream", API_C10)),
# This substitution is not permissible, because there's another copy of this
# function in torch/cuda.h
# ("cuda::device_count", ("hip::device_count", API_C10)),
("cuda::current_device", ("hip::current_device", API_C10)),
("cuda::set_device", ("hip::set_device", API_C10)),
("cuda::device_synchronize", ("hip::device_synchronize", API_C10)),
("cuda::getStreamFromPool", ("hip::getStreamFromPool", API_C10)),
("getStreamFromPool", ("getStreamFromPool", API_C10)),
("cuda::getDefaultCUDAStream", ("hip::getDefaultHIPStream", API_C10)),
("getDefaultCUDAStream", ("getDefaultHIPStream", API_C10)),
("cuda::getCurrentCUDAStream", ("hip::getCurrentHIPStream", API_C10)),
("getCurrentCUDAStream", ("getCurrentHIPStream", API_C10)),
("cuda::get_cuda_check_prefix", ("hip::get_cuda_check_prefix", API_C10)),
("cuda::setCurrentCUDAStream", ("hip::setCurrentHIPStream", API_C10)),
("setCurrentCUDAStream", ("setCurrentHIPStream", API_C10)),
("cuda::CUDACachingAllocator", ("hip::HIPCachingAllocator", API_C10)),
("CUDACachingAllocator", ("HIPCachingAllocator", API_C10)),
("cuda::CUDAAllocatorConfig", ("hip::HIPAllocatorConfig", API_C10)),
("CUDAAllocatorConfig", ("HIPAllocatorConfig", API_C10)),
("pinned_use_cuda_host_register", ("pinned_use_hip_host_register", API_C10)),
("c10::cuda::CUDAAllocator", ("c10::hip::HIPAllocator", API_C10)),
("cuda::CUDAAllocator", ("hip::HIPAllocator", API_C10)),
("CUDAStreamCaptureModeGuard", ("HIPStreamCaptureModeGuard", API_C10)),
("cuda::CUDAStreamCaptureModeGuard", ("cuda::HIPStreamCaptureModeGuard", API_C10)),
("CUDAAllocator", ("HIPAllocator", API_C10)),
("C10_CUDA_KERNEL_LAUNCH_CHECK", ("C10_HIP_KERNEL_LAUNCH_CHECK", API_C10))
]
)
# NB: C10 mappings are more specific than Caffe2 mappings, so run them
# first
CUDA_TO_HIP_MAPPINGS = [
CUDA_IDENTIFIER_MAP,
CUDA_TYPE_NAME_MAP,
CUDA_INCLUDE_MAP,
CUDA_SPECIAL_MAP,
C10_MAPPINGS,
PYTORCH_SPECIFIC_MAPPINGS,
CAFFE2_SPECIFIC_MAPPINGS,
]
```
|
===========================================================================================================================
SOURCE CODE FILE: hipify_python.py
LINES: 10
SIZE: 47.06 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\hipify\hipify_python.py
ENCODING: utf-8
```py
#!/usr/bin/env python3
# mypy: allow-untyped-defs
""" The Python Hipify script.
##
# Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
# 2017-2018 Advanced Micro Devices, Inc. and
# Facebook Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
import argparse
import fnmatch
import re
import shutil
import sys
import os
from . import constants
from .cuda_to_hip_mappings import CUDA_TO_HIP_MAPPINGS
from .cuda_to_hip_mappings import MATH_TRANSPILATIONS
from typing import Optional
from collections.abc import Iterator
from collections.abc import Mapping, Iterable
from enum import Enum
import functools
import hashlib
class CurrentState(Enum):
INITIALIZED = 1
DONE = 2
class HipifyResult:
def __init__(self, current_state, hipified_path):
self.current_state = current_state
self.hipified_path = hipified_path
self.status = ""
def __str__(self):
return (f"HipifyResult:: current_state: {self.current_state}, hipified_path : {self.hipified_path}, status: {self.status}")
HipifyFinalResult = dict[str, HipifyResult]
HIPIFY_C_BREADCRUMB = "// !!! This is a file automatically generated by hipify!!!\n"
HIPIFY_FINAL_RESULT: HipifyFinalResult = {}
# Hardcode the PyTorch template map
"""This dictionary provides the mapping from PyTorch kernel template types
to their actual types."""
PYTORCH_TEMPLATE_MAP = {"Dtype": "scalar_t", "T": "scalar_t"}
__all__ = ['InputError', 'openf', 'bcolors', 'GeneratedFileCleaner', 'match_extensions', 'matched_files_iter',
'preprocess_file_and_save_result', 'compute_stats', 'add_dim3', 'processKernelLaunches', 'find_closure_group',
'find_bracket_group', 'find_parentheses_group', 'replace_math_functions', 'hip_header_magic', 'replace_extern_shared',
'get_hip_file_path', 'is_out_of_place', 'is_pytorch_file', 'is_cusparse_file', 'is_special_file', 'is_caffe2_gpu_file',
'is_caffe2_gpu_file', 'Trie', 'preprocessor', 'file_specific_replacement', 'file_add_header',
'fix_static_global_kernels', 'extract_arguments', 'str2bool', 'CurrentState', 'HipifyResult', 'hipify']
class InputError(Exception):
# Exception raised for errors in the input.
def __init__(self, message):
super().__init__(message)
self.message = message
def __str__(self):
return f"Input error: {self.message}"
def openf(filename, mode):
return open(filename, mode, errors='ignore')
# Color coding for printing
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# To the programmer, the output of hipify most likely are intermediates.
# This class allows users of hipify to ask for a cleanup by running the
# hipify and compilation in a with instantiating this context manager class
# with keep_intermediates=False.
# The main usecase is the cpp_extensions, specifically the load method.
# It is a good idea to keep intermediates (in case of errors or to
# not recompile unchanged files), but in cases where you don't want to
# keep them (e.g. in the CI), this can be used to remove files.
class GeneratedFileCleaner:
"""Context Manager to clean up generated files"""
def __init__(self, keep_intermediates=False):
self.keep_intermediates = keep_intermediates
self.files_to_clean = set()
self.dirs_to_clean = []
def __enter__(self):
return self
def open(self, fn, *args, **kwargs):
if not os.path.exists(fn):
self.files_to_clean.add(os.path.abspath(fn))
return open(fn, *args, **kwargs)
def makedirs(self, dn, exist_ok=False):
parent, n = os.path.split(dn)
if not n:
parent, n = os.path.split(parent)
if parent and n and not os.path.exists(parent):
self.makedirs(parent, exist_ok=True)
if not os.path.isdir(dn) or not exist_ok:
os.mkdir(dn)
self.dirs_to_clean.append(os.path.abspath(dn))
def __exit__(self, type, value, traceback):
if not self.keep_intermediates:
for f in self.files_to_clean:
os.unlink(f)
for d in self.dirs_to_clean[::-1]:
os.rmdir(d)
# Follow UNIX convention for paths to use '/' instead of '\\' on Windows
def _to_unix_path(path: str) -> str:
return path.replace(os.sep, '/')
def match_extensions(filename: str, extensions: Iterable) -> bool:
"""Helper method to see if filename ends with certain extension"""
return any(filename.endswith(e) for e in extensions)
def _fnmatch(filepath, patterns):
return any(fnmatch.fnmatch(filepath, pattern) for pattern in patterns)
def matched_files_iter(
root_path: str,
includes: Iterable = (),
ignores: Iterable = (),
extensions: Iterable = (),
out_of_place_only: bool = False,
is_pytorch_extension: bool = False) -> Iterator[str]:
exact_matches = set(includes)
# This is a very rough heuristic; really, we want to avoid scanning
# any file which is not checked into source control, but this script
# needs to work even if you're in a Git or Hg checkout, so easier to
# just block the biggest time sinks that won't matter in the
# end.
for (abs_dirpath, dirs, filenames) in os.walk(root_path, topdown=True):
rel_dirpath = os.path.relpath(abs_dirpath, root_path)
if rel_dirpath == '.':
# Blah blah blah O(n) blah blah
if ".git" in dirs:
dirs.remove(".git")
if "build" in dirs:
dirs.remove("build")
if "third_party" in dirs:
dirs.remove("third_party")
dirs.append("third_party/nvfuser")
for filename in filenames:
filepath = _to_unix_path(os.path.join(abs_dirpath, filename))
rel_filepath = _to_unix_path(os.path.join(rel_dirpath, filename))
# We respect extensions, UNLESS you wrote the entire
# filename verbatim, in which case we always accept it
if (
_fnmatch(filepath, includes)
and (not _fnmatch(filepath, ignores))
and (match_extensions(filepath, extensions) or filepath in exact_matches)
):
if not is_pytorch_extension: # for pytorch extensions, consider all files
if not is_pytorch_file(rel_filepath) and not is_caffe2_gpu_file(rel_filepath):
continue
if out_of_place_only and not is_out_of_place(rel_filepath):
continue
yield filepath
def preprocess_file_and_save_result(
output_directory: str,
filepath: str,
all_files: Iterable,
header_include_dirs: Iterable,
stats: dict[str, list],
hip_clang_launch: bool,
is_pytorch_extension: bool,
clean_ctx: GeneratedFileCleaner,
show_progress: bool) -> None:
fin_path = os.path.abspath(os.path.join(output_directory, filepath))
hipify_result = HipifyResult(current_state=CurrentState.INITIALIZED, hipified_path=fin_path)
HIPIFY_FINAL_RESULT[fin_path] = hipify_result
result = preprocessor(output_directory, filepath, all_files, header_include_dirs, stats,
hip_clang_launch, is_pytorch_extension, clean_ctx, show_progress)
# Show what happened
if show_progress and "ignored" not in result.status:
print(
fin_path, "->",
result.hipified_path, result.status, flush=True)
HIPIFY_FINAL_RESULT[fin_path] = result
def compute_stats(stats):
unsupported_calls = {cuda_call for (cuda_call, _filepath) in stats["unsupported_calls"]}
# Print the number of unsupported calls
print(f"Total number of unsupported CUDA function calls: {len(unsupported_calls):d}")
# Print the list of unsupported calls
print(", ".join(unsupported_calls))
# Print the number of kernel launches
print(f"\nTotal number of replaced kernel launches: {len(stats['kernel_launches']):d}")
def add_dim3(kernel_string, cuda_kernel):
'''adds dim3() to the second and third arguments in the kernel launch'''
count = 0
closure = 0
kernel_string = kernel_string.replace("<<<", "").replace(">>>", "")
arg_locs: list[dict[str, int]] = [{} for _ in range(2)]
arg_locs[count]['start'] = 0
for ind, c in enumerate(kernel_string):
if count > 1:
break
if c == "(":
closure += 1
elif c == ")":
closure -= 1
if (c == "," or ind == len(kernel_string) - 1) and closure == 0:
arg_locs[count]['end'] = ind + (c != ",")
count += 1
if count < 2:
arg_locs[count]['start'] = ind + 1
first_arg_raw = kernel_string[arg_locs[0]['start']:arg_locs[0]['end'] + 1]
second_arg_raw = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']]
first_arg_clean = kernel_string[arg_locs[0]['start']:arg_locs[0]['end']].replace("\n", "").strip(" ")
second_arg_clean = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']].replace("\n", "").strip(" ")
first_arg_dim3 = f"dim3({first_arg_clean})"
second_arg_dim3 = f"dim3({second_arg_clean})"
first_arg_raw_dim3 = first_arg_raw.replace(first_arg_clean, first_arg_dim3)
second_arg_raw_dim3 = second_arg_raw.replace(second_arg_clean, second_arg_dim3)
cuda_kernel = cuda_kernel.replace(first_arg_raw + second_arg_raw, first_arg_raw_dim3 + second_arg_raw_dim3)
return cuda_kernel
RE_KERNEL_LAUNCH = re.compile(r'([ ]+)(detail?)::[ ]+\\\n[ ]+')
def processKernelLaunches(string, stats):
""" Replace the CUDA style Kernel launches with the HIP style kernel launches."""
# Concat the namespace with the kernel names. (Find cleaner way of doing this later).
string = RE_KERNEL_LAUNCH.sub(lambda inp: f"{inp.group(1)}{inp.group(2)}::", string)
def grab_method_and_template(in_kernel):
# The positions for relevant kernel components.
pos = {
"kernel_launch": {"start": in_kernel["start"], "end": in_kernel["end"]},
"kernel_name": {"start": -1, "end": -1},
"template": {"start": -1, "end": -1}
}
# Count for balancing template
count = {"<>": 0}
# Status for whether we are parsing a certain item.
START = 0
AT_TEMPLATE = 1
AFTER_TEMPLATE = 2
AT_KERNEL_NAME = 3
status = START
# Parse the string character by character
for i in range(pos["kernel_launch"]["start"] - 1, -1, -1):
char = string[i]
# Handle Templating Arguments
if status in (START, AT_TEMPLATE):
if char == ">":
if status == START:
status = AT_TEMPLATE
pos["template"]["end"] = i
count["<>"] += 1
if char == "<":
count["<>"] -= 1
if count["<>"] == 0 and (status == AT_TEMPLATE):
pos["template"]["start"] = i
status = AFTER_TEMPLATE
# Handle Kernel Name
if status != AT_TEMPLATE:
if string[i].isalnum() or string[i] in {'(', ')', '_', ':', '#'}:
if status != AT_KERNEL_NAME:
status = AT_KERNEL_NAME
pos["kernel_name"]["end"] = i
# Case: Kernel name starts the string.
if i == 0:
pos["kernel_name"]["start"] = 0
# Finished
return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])]
else:
# Potential ending point if we're already traversing a kernel's name.
if status == AT_KERNEL_NAME:
pos["kernel_name"]["start"] = i
# Finished
return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])]
def find_kernel_bounds(string):
"""Finds the starting and ending points for all kernel launches in the string."""
kernel_end = 0
kernel_positions = []
# Continue until we cannot find any more kernels anymore.
while string.find("<<<", kernel_end) != -1:
# Get kernel starting position (starting from the previous ending point)
kernel_start = string.find("<<<", kernel_end)
# Get kernel ending position (adjust end point past the >>>)
kernel_end = string.find(">>>", kernel_start) + 3
if kernel_end <= 0:
raise InputError("no kernel end found")
# Add to list of traversed kernels
kernel_positions.append({"start": kernel_start, "end": kernel_end,
"group": string[kernel_start: kernel_end]})
return kernel_positions
# Replace comments and string literals from the code so that find_kernel_bounds does not
# wrongly capture kernels in comments and string literals.
# This function replaces them with "x" to keep positions.
def mask_comments(string):
in_comment = ''
prev_c = ''
new_string = ''
for c in string:
if in_comment == '':
# Outside comments
if c == '/' and prev_c == '/':
in_comment = '//'
elif c == '*' and prev_c == '/':
in_comment = '/*'
elif c == '"' and prev_c != '\\' and prev_c != "'":
in_comment = '"'
elif in_comment == '//':
# In // xxx
if c == '\r' or c == '\n':
in_comment = ''
elif in_comment == '/*':
# In /* xxx */
if c == '/' and prev_c == '*':
in_comment = ''
elif in_comment == '"':
# In ""
if c == '"' and prev_c != '\\':
in_comment = ''
prev_c = c
if in_comment == '':
new_string += c
else:
new_string += 'x'
return new_string
# Grab positional ranges of all kernel launches
get_kernel_positions = list(find_kernel_bounds(mask_comments(string)))
output_string = string
# Replace each CUDA kernel with a HIP kernel.
for kernel in get_kernel_positions:
# Get kernel components
params = grab_method_and_template(kernel)
# Find parenthesis after kernel launch
parenthesis = string.find("(", kernel["end"])
# Extract cuda kernel
cuda_kernel = string[params[0]["start"]:parenthesis + 1]
kernel_string = string[kernel['start']:kernel['end']]
end_param_index = 0 if params[1]['end'] == -1 else 1
kernel_name_with_template = string[params[0]['start']:params[end_param_index]['end'] + 1]
cuda_kernel_dim3 = add_dim3(kernel_string, cuda_kernel)
# Keep number of kernel launch params consistent (grid dims, group dims, stream, dynamic shared size)
num_klp = len(extract_arguments(0, kernel["group"].replace("<<<", "(").replace(">>>", ")")))
hip_kernel = "hipLaunchKernelGGL(" + cuda_kernel_dim3[0:-1].replace(
">>>", ", 0" * (4 - num_klp) + ">>>").replace("<<<", ", ").replace(
">>>", ", ").replace(kernel_name_with_template, "(" + kernel_name_with_template + ")")
# Replace cuda kernel with hip kernel
output_string = output_string.replace(cuda_kernel, hip_kernel)
# Update the statistics
stats["kernel_launches"].append(hip_kernel)
return output_string
def find_closure_group(input_string, start, group):
"""Generalization for finding a balancing closure group
if group = ["(", ")"], then finds the first balanced parentheses.
if group = ["{", "}"], then finds the first balanced bracket.
Given an input string, a starting position in the input string, and the group type,
find_closure_group returns the positions of group[0] and group[1] as a tuple.
Example:
>>> find_closure_group("(hi)", 0, ["(", ")"])
(0, 3)
"""
inside_parenthesis = False
parens = 0
pos = start
p_start, p_end = -1, -1
while pos < len(input_string):
if input_string[pos] == group[0]:
if inside_parenthesis is False:
inside_parenthesis = True
parens = 1
p_start = pos
else:
parens += 1
elif input_string[pos] == group[1] and inside_parenthesis:
parens -= 1
if parens == 0:
p_end = pos
return p_start, p_end
pos += 1
return None, None
def find_bracket_group(input_string, start):
"""Finds the first balanced parantheses."""
return find_closure_group(input_string, start, group=["{", "}"])
def find_parentheses_group(input_string, start):
"""Finds the first balanced bracket."""
return find_closure_group(input_string, start, group=["(", ")"])
RE_ASSERT = re.compile(r"\bassert[ ]*\(")
def replace_math_functions(input_string):
"""FIXME: Temporarily replace std:: invocations of math functions
with non-std:: versions to prevent linker errors NOTE: This
can lead to correctness issues when running tests, since the
correct version of the math function (exp/expf) might not get
called. Plan is to remove this function once HIP supports
std:: math function calls inside device code
"""
output_string = input_string
for func in MATH_TRANSPILATIONS:
output_string = output_string.replace(fr'{func}(', f'{MATH_TRANSPILATIONS[func]}(')
return output_string
RE_SYNCTHREADS = re.compile(r":?:?\b(__syncthreads)\b(\w*\()")
def hip_header_magic(input_string):
"""If the file makes kernel builtin calls and does not include the cuda_runtime.h header,
then automatically add an #include to match the "magic" includes provided by NVCC.
TODO:
Update logic to ignore cases where the cuda_runtime.h is included by another file.
"""
# Copy the input.
output_string = input_string
# Check if one of the following headers is already included.
headers = ["hip/hip_runtime.h", "hip/hip_runtime_api.h"]
if any(re.search(fr'#include ("{ext}"|<{ext}>)', output_string) for ext in headers):
return output_string
# Rough logic to detect if we're inside device code
hasDeviceLogic: int
hasDeviceLogic = "hipLaunchKernelGGL" in output_string
hasDeviceLogic += "__global__" in output_string
hasDeviceLogic += "__shared__" in output_string
hasDeviceLogic += RE_SYNCTHREADS.search(output_string) is not None
# If device logic found, provide the necessary header.
if hasDeviceLogic:
output_string = '#include "hip/hip_runtime.h"\n' + input_string
return output_string
RE_EXTERN_SHARED = re.compile(r"extern\s+([\w\(\)]+)?\s*__shared__\s+([\w:<>\s]+)\s+(\w+)\s*\[\s*\]\s*;")
def replace_extern_shared(input_string):
"""Match extern __shared__ type foo[]; syntax and use HIP_DYNAMIC_SHARED() MACRO instead.
https://github.com/ROCm-Developer-Tools/HIP/blob/master/docs/markdown/hip_kernel_language.md#__shared__
Example:
"extern __shared__ char smemChar[];" => "HIP_DYNAMIC_SHARED( char, smemChar)"
"extern __shared__ unsigned char smem[];" => "HIP_DYNAMIC_SHARED( unsigned char, my_smem)"
"""
output_string = input_string
output_string = RE_EXTERN_SHARED.sub(
lambda inp: f"HIP_DYNAMIC_SHARED({inp.group(1) or ''} {inp.group(2)}, {inp.group(3)})", output_string)
return output_string
def get_hip_file_path(rel_filepath, is_pytorch_extension=False):
"""
Returns the new name of the hipified file
"""
# At the moment, some PyTorch source files are HIPified in place. The predicate
# is_out_of_place tells us if this is the case or not.
assert not os.path.isabs(rel_filepath)
if not is_pytorch_extension and not is_out_of_place(rel_filepath):
return rel_filepath
dirpath, filename = os.path.split(rel_filepath)
root, ext = os.path.splitext(filename)
# Here's the plan:
#
# In general, we need to disambiguate the HIPified filename so that
# it gets a different name from the original filename, so
# that we don't overwrite the original file
#
# There's a lot of different naming conventions across PyTorch
# and Caffe2, but the general recipe is to convert occurrences
# of cuda/gpu to hip, and add hip if there are no occurrences
# of cuda/gpu anywhere.
#
# Concretely, we do the following:
#
# - If there is a directory component named "cuda", replace
# it with "hip", AND
#
# - If the file name contains "CUDA", replace it with "HIP", AND
#
# - ALWAYS replace '.cu' with '.hip', because those files
# contain CUDA kernels that needs to be hipified and processed with
# hip compiler
#
# - If we are not hipifying a PyTorch extension, and the parent
# directory name did not change as a result of the above
# transformations, insert "hip" in the file path
# as the direct parent folder of the file
#
# - If we are hipifying a PyTorch extension, and the parent directory
# name as well as the filename (incl. extension) did not change as
# a result of the above transformations, insert "_hip" in the filename
#
# This isn't set in stone; we might adjust this to support other
# naming conventions.
if ext == '.cu':
ext = '.hip'
orig_filename = filename
orig_dirpath = dirpath
dirpath = dirpath.replace('cuda', 'hip')
dirpath = dirpath.replace('CUDA', 'HIP')
dirpath = dirpath.replace('THC', 'THH')
root = root.replace('cuda', 'hip')
root = root.replace('CUDA', 'HIP')
# Special case to handle caffe2/core/THCCachingAllocator
if dirpath != "caffe2/core":
root = root.replace('THC', 'THH')
if not is_pytorch_extension and dirpath == orig_dirpath:
dirpath = os.path.join(dirpath, 'hip')
if is_pytorch_extension and dirpath == orig_dirpath and (root + ext) == orig_filename:
root = root + "_hip"
return os.path.join(dirpath, root + ext)
def is_out_of_place(rel_filepath):
assert not os.path.isabs(rel_filepath)
if rel_filepath.startswith("torch/"):
return False
if rel_filepath.startswith("third_party/nvfuser/"):
return False
if rel_filepath.startswith("tools/autograd/templates/"):
return False
return True
# Keep this synchronized with includes/ignores in build_amd.py
def is_pytorch_file(rel_filepath):
assert not os.path.isabs(rel_filepath)
if rel_filepath.startswith("aten/"):
if rel_filepath.startswith("aten/src/ATen/core/"):
return False
return True
if rel_filepath.startswith("torch/"):
return True
if rel_filepath.startswith("third_party/nvfuser/"):
return True
if rel_filepath.startswith("tools/autograd/templates/"):
return True
return False
def is_cusparse_file(rel_filepath):
if is_pytorch_file(rel_filepath):
return "sparse" in rel_filepath.lower()
return False
def is_special_file(rel_filepath):
if is_pytorch_file(rel_filepath):
if "sparse" in rel_filepath.lower():
return True
elif "linalg" in rel_filepath.lower():
if "batchlinearalgebralibblas" in rel_filepath.lower():
return False # don't use "special" mappings for this specific linalg cublas file
return True
return False
def is_caffe2_gpu_file(rel_filepath):
assert not os.path.isabs(rel_filepath)
if rel_filepath.startswith("c10/cuda"):
return True
filename = os.path.basename(rel_filepath)
_, ext = os.path.splitext(filename)
return ('gpu' in filename or ext in ['.cu', '.cuh']) and ('cudnn' not in filename)
class TrieNode:
"""A Trie node whose children are represented as a directory of char: TrieNode.
A special char '' represents end of word
"""
def __init__(self):
self.children = {}
class Trie:
"""Creates a Trie out of a list of words. The trie can be exported to a Regex pattern.
The corresponding Regex should match much faster than a simple Regex union."""
def __init__(self):
"""Initialize the trie with an empty root node."""
self.root = TrieNode()
self._hash = hashlib.md5(usedforsecurity=False)
self._digest = self._hash.digest()
def add(self, word):
"""Add a word to the Trie. """
self._hash.update(word.encode())
self._digest = self._hash.digest()
node = self.root
for char in word:
node.children.setdefault(char, TrieNode())
node = node.children[char]
node.children[''] = True # Mark the end of the word
def dump(self):
"""Return the root node of Trie. """
return self.root
def quote(self, char):
""" Escape a char for regex. """
return re.escape(char)
def search(self, word):
"""Search whether word is present in the Trie.
Returns True if yes, else return False"""
node = self.root
for char in word:
if char in node.children:
node = node.children[char]
else:
return False
# make sure to check the end-of-word marker present
return '' in node.children
@functools.lru_cache # noqa: B019
def _pattern(self, root, digest):
"""Convert a Trie into a regular expression pattern
Memoized on the hash digest of the trie, which is built incrementally
during add().
"""
node = root
if "" in node.children and len(node.children.keys()) == 1:
return None
alt = [] # store alternative patterns
cc = [] # store char to char classes
q = 0 # for node representing the end of word
for char in sorted(node.children.keys()):
if isinstance(node.children[char], TrieNode):
try:
recurse = self._pattern(node.children[char], self._digest)
alt.append(self.quote(char) + recurse)
except Exception:
cc.append(self.quote(char))
else:
q = 1
cconly = not len(alt) > 0
if len(cc) > 0:
if len(cc) == 1:
alt.append(cc[0])
else:
alt.append('[' + ''.join(cc) + ']')
if len(alt) == 1:
result = alt[0]
else:
result = "(?:" + "|".join(alt) + ")"
if q:
if cconly:
result += "?"
else:
result = f"(?:{result})?"
return result
def pattern(self):
"""Export the Trie to a regex pattern."""
return self._pattern(self.root, self._digest)
def export_to_regex(self):
"""Export the Trie to a regex pattern."""
return self._pattern(self.root, self._digest)
CAFFE2_TRIE = Trie()
CAFFE2_MAP = {}
PYTORCH_TRIE = Trie()
PYTORCH_MAP: dict[str, object] = {}
# In PyTorch, we map cuBLAS->rocBLAS and cuSPARSE->hipSPARSE. Note the prefix, roc versus hip.
# The 'hip' APIs offer a more direct CUDA-friendly mapping, but calling rocBLAS directly has better performance.
# Unfortunately, the roc* types and hip* types differ, i.e., rocblas_float_complex versus hipComplex.
# In the case of SPARSE, we must use the hip types for complex instead of the roc types,
# but the pytorch mappings assume roc. Therefore, we create a new SPARSE mapping that has a higher priority.
# Its mappings will trigger first, and only when a miss occurs will the lower-priority pytorch mapping take place.
# When a file contains "sparse" in the filename, a mapping marked with API_SPARSE is preferred over other choices.
# Similarly, "linalg" files require rocBLAS -> hipSOLVER so they also need special handling.
PYTORCH_SPECIAL_MAP = {}
for mapping in CUDA_TO_HIP_MAPPINGS:
assert isinstance(mapping, Mapping)
for src, value in mapping.items():
dst = value[0]
meta_data = value[1:]
if constants.API_CAFFE2 not in meta_data:
PYTORCH_TRIE.add(src)
# if src is already in PYTORCH_MAP and dst belongs to API_SPECIAL
# do not overwrite PYTORCH_MAP, store dst separately
if constants.API_SPECIAL in meta_data and PYTORCH_MAP.get(src, ""):
PYTORCH_SPECIAL_MAP[src] = dst
else:
PYTORCH_MAP[src] = dst
if constants.API_PYTORCH not in meta_data and constants.API_SPECIAL not in meta_data:
CAFFE2_TRIE.add(src)
CAFFE2_MAP[src] = dst
RE_CAFFE2_PREPROCESSOR = re.compile(CAFFE2_TRIE.export_to_regex())
RE_PYTORCH_PREPROCESSOR = re.compile(fr'(?<=\W)({PYTORCH_TRIE.export_to_regex()})(?=\W)')
RE_QUOTE_HEADER = re.compile(r'#include "([^"]+)"')
RE_ANGLE_HEADER = re.compile(r'#include <([^>]+)>')
RE_THC_GENERIC_FILE = re.compile(r'#define THC_GENERIC_FILE "([^"]+)"')
RE_CU_SUFFIX = re.compile(r'\.cu\b') # be careful not to pick up .cuh
"""
Returns a HipifyResult object with the following details:
"hipified_path" : absolute path of hipified source file
"status" : "ok" if hipified file was written out
"skipped" if an identical hipified file already existed or hipified file couldn't be written out
"ignored" if the source file was a hipified file itself or not meant to be hipified
"current_state" : CurrentState.INITIALIZED if source file is first ready to be hipified
CurrentState.DONE if source file is done with hipification process
"""
def preprocessor(
output_directory: str,
filepath: str,
all_files: Iterable,
header_include_dirs: Iterable,
stats: dict[str, list],
hip_clang_launch: bool,
is_pytorch_extension: bool,
clean_ctx: GeneratedFileCleaner,
show_progress: bool) -> HipifyResult:
""" Executes the CUDA -> HIP conversion on the specified file. """
fin_path = os.path.abspath(os.path.join(output_directory, filepath))
hipify_result = HIPIFY_FINAL_RESULT[fin_path]
if filepath not in all_files:
hipify_result.hipified_path = None
hipify_result.status = "[ignored, not to be hipified]"
hipify_result.current_state = CurrentState.DONE
return hipify_result
rel_filepath = _to_unix_path(os.path.relpath(filepath, output_directory))
with open(fin_path, encoding='utf-8') as fin:
if fin.readline() == HIPIFY_C_BREADCRUMB:
hipify_result.hipified_path = None
hipify_result.status = "[ignored, input is hipified output]"
hipify_result.current_state = CurrentState.DONE
return hipify_result
fin.seek(0)
output_source = fin.read()
orig_output_source = output_source
# get_hip_file_path needs a relative path to work correctly
fout_path = os.path.abspath(os.path.join(output_directory, get_hip_file_path(rel_filepath, is_pytorch_extension)))
if not os.path.exists(os.path.dirname(fout_path)):
clean_ctx.makedirs(os.path.dirname(fout_path))
# unsupported_calls statistics reporting is broken atm
def pt_repl(m):
return PYTORCH_MAP[m.group(0)]
def pt_special_repl(m):
# checks SPECIAL map first, and if a miss occurs, falls back to pytorch mappings
return PYTORCH_SPECIAL_MAP.get(m.group(0), pt_repl(m))
if is_pytorch_extension:
output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source)
else:
if is_special_file(rel_filepath):
output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_special_repl, output_source)
elif is_pytorch_file(rel_filepath):
output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source)
else:
def c2_repl(m):
return CAFFE2_MAP[m.group(0)]
output_source = RE_CAFFE2_PREPROCESSOR.sub(c2_repl, output_source)
# Header rewrites
def mk_repl(templ, include_current_dir=True):
def repl(m):
f = m.group(1)
filename = os.path.basename(f)
if (
f.startswith(("ATen/cuda",
"ATen/native/cuda",
"ATen/native/nested/cuda",
"ATen/native/quantized/cuda",
"ATen/native/sparse/cuda",
"ATen/native/transformers/cuda",
"THC/")) or
(f.startswith("THC") and not f.startswith("THCP"))
):
return templ.format(get_hip_file_path(m.group(1), is_pytorch_extension))
# if filename is one of the files being hipified for this extension
if (is_pytorch_extension and any(s.endswith(filename) for s in all_files)):
header_dir = None
header_filepath = None
# If include_current_dir True, look first in same dir as the including source file
if include_current_dir:
header_dir_to_check = os.path.dirname(fin_path)
header_path_to_check = os.path.abspath(os.path.join(header_dir_to_check, f))
if os.path.exists(header_path_to_check):
header_dir = header_dir_to_check
header_filepath = header_path_to_check
# If not found, look in include dirs one by one and first match wins
if header_filepath is None:
for header_include_dir in header_include_dirs:
header_dir_to_check = os.path.join(output_directory, header_include_dir)
header_path_to_check = os.path.abspath(os.path.join(header_dir_to_check, f))
if os.path.exists(header_path_to_check):
header_dir = header_dir_to_check
header_filepath = header_path_to_check
# If header file not found, keep as is
if header_filepath is None:
return m.group(0)
# Hipify header file first if needed
if header_filepath not in HIPIFY_FINAL_RESULT:
preprocess_file_and_save_result(output_directory,
header_filepath,
all_files, header_include_dirs, stats, hip_clang_launch,
is_pytorch_extension, clean_ctx, show_progress)
elif header_filepath in HIPIFY_FINAL_RESULT:
header_result = HIPIFY_FINAL_RESULT[header_filepath]
if header_result.current_state == CurrentState.INITIALIZED:
# get_hip_file_path needs a relative path to work correctly
header_rel_path = os.path.relpath(header_filepath, output_directory)
header_fout_path = os.path.abspath(os.path.join(output_directory,
get_hip_file_path(header_rel_path, is_pytorch_extension)))
header_result.hipified_path = header_fout_path
HIPIFY_FINAL_RESULT[header_filepath] = header_result
return templ.format(os.path.relpath(header_fout_path if header_fout_path is not None
else header_filepath, header_dir))
hipified_header_filepath = HIPIFY_FINAL_RESULT[header_filepath].hipified_path
return templ.format(os.path.relpath(hipified_header_filepath if hipified_header_filepath is not None
else header_filepath, header_dir))
return m.group(0)
return repl
output_source = RE_QUOTE_HEADER.sub(mk_repl('#include "{0}"', True), output_source)
output_source = RE_ANGLE_HEADER.sub(mk_repl('#include <{0}>', False), output_source)
output_source = RE_THC_GENERIC_FILE.sub(mk_repl('#define THC_GENERIC_FILE "{0}"'), output_source)
# CMakeLists.txt rewrites
if filepath.endswith('CMakeLists.txt'):
output_source = output_source.replace('CUDA', 'HIP')
output_source = output_source.replace('THC', 'THH')
output_source = RE_CU_SUFFIX.sub('.hip', output_source)
# Perform Kernel Launch Replacements
if not hip_clang_launch:
output_source = processKernelLaunches(output_source, stats)
# Replace std:: with non-std:: versions
if (filepath.endswith((".cu", ".cuh"))) and "PowKernel" not in filepath:
output_source = replace_math_functions(output_source)
# Include header if device code is contained.
output_source = hip_header_magic(output_source)
# Replace the extern __shared__
# NOTE: No longer needed after transition from hcc to hipclang.
# output_source = replace_extern_shared(output_source)
# Don't write out identical hipified files for extensions if dirpath has not changed
if (
is_pytorch_extension
and orig_output_source == output_source
and os.path.dirname(fin_path) == os.path.dirname(fout_path)
):
hipify_result.hipified_path = fin_path
hipify_result.status = "[skipped, no changes]"
hipify_result.current_state = CurrentState.DONE
return hipify_result
# Add hipify breadcrumb for C-style files to avoid re-hipification
if fin_path != fout_path and match_extensions(fin_path, (".cu", ".cuh", ".c", ".cc", ".cpp", ".h", ".hpp")):
output_source = HIPIFY_C_BREADCRUMB + output_source
do_write = True
if os.path.exists(fout_path):
with open(fout_path, encoding='utf-8') as fout_old:
do_write = fout_old.read() != output_source
if do_write:
try:
with clean_ctx.open(fout_path, 'w', encoding='utf-8') as fout:
fout.write(output_source)
hipify_result.hipified_path = fout_path
hipify_result.status = "[ok]"
hipify_result.current_state = CurrentState.DONE
return hipify_result
except PermissionError as e:
print(f'{bcolors.WARNING}Failed to save {fout_path} with "{e.strerror}", leaving {fin_path} unchanged.{bcolors.ENDC}',
file=sys.stderr)
hipify_result.hipified_path = fin_path
hipify_result.status = "[skipped, no permissions]"
hipify_result.current_state = CurrentState.DONE
return hipify_result
else:
hipify_result.hipified_path = fout_path
hipify_result.status = "[skipped, already hipified]"
hipify_result.current_state = CurrentState.DONE
return hipify_result
def file_specific_replacement(filepath, search_string, replace_string, strict=False):
with openf(filepath, "r+") as f:
contents = f.read()
if strict:
contents = re.sub(fr'\b({re.escape(search_string)})\b', lambda x: replace_string, contents)
else:
contents = contents.replace(search_string, replace_string)
f.seek(0)
f.write(contents)
f.truncate()
def file_add_header(filepath, header):
with openf(filepath, "r+") as f:
contents = f.read()
if header[0] != "<" and header[-1] != ">":
header = f'"{header}"'
contents = (f'#include {header} \n') + contents
f.seek(0)
f.write(contents)
f.truncate()
def fix_static_global_kernels(in_txt):
"""Static global kernels in HIP results in a compilation error."""
in_txt = in_txt.replace(" __global__ static", "__global__")
return in_txt
RE_INCLUDE = re.compile(r"#include .*\n")
def extract_arguments(start, string):
""" Return the list of arguments in the upcoming function parameter closure.
Example:
string (input): '(blocks, threads, 0, THCState_getCurrentStream(state))'
arguments (output):
'[{'start': 1, 'end': 7},
{'start': 8, 'end': 16},
{'start': 17, 'end': 19},
{'start': 20, 'end': 53}]'
"""
arguments = []
closures = {
"<": 0,
"(": 0
}
current_position = start
argument_start_pos = current_position + 1
# Search for final parenthesis
while current_position < len(string):
if string[current_position] == "(":
closures["("] += 1
elif string[current_position] == ")":
closures["("] -= 1
elif string[current_position] == "<":
closures["<"] += 1
elif string[current_position] == ">" and string[current_position - 1] != "-" and closures["<"] > 0:
closures["<"] -= 1
# Finished all arguments
if closures["("] == 0 and closures["<"] == 0:
# Add final argument
arguments.append({"start": argument_start_pos, "end": current_position})
break
# Finished current argument
if closures["("] == 1 and closures["<"] == 0 and string[current_position] == ",":
arguments.append({"start": argument_start_pos, "end": current_position})
argument_start_pos = current_position + 1
current_position += 1
return arguments
def str2bool(v):
"""ArgumentParser doesn't support type=bool. Thus, this helper method will convert
from possible string types to True / False."""
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def hipify(
project_directory: str,
show_detailed: bool = False,
extensions: Iterable = (".cu", ".cuh", ".c", ".cc", ".cpp", ".h", ".in", ".hpp"),
header_extensions: Iterable = (".cuh", ".h", ".hpp"),
output_directory: str = "",
header_include_dirs: Iterable = (),
includes: Iterable = ('*',),
extra_files: Iterable = (),
out_of_place_only: bool = False,
ignores: Iterable = (),
show_progress: bool = True,
hip_clang_launch: bool = False,
is_pytorch_extension: bool = False,
hipify_extra_files_only: bool = False,
clean_ctx: Optional[GeneratedFileCleaner] = None
) -> HipifyFinalResult:
if project_directory == "":
project_directory = os.getcwd()
# Verify the project directory exists.
if not os.path.exists(project_directory):
print("The project folder specified does not exist.")
sys.exit(1)
# If no output directory, provide a default one.
if not output_directory:
project_directory.rstrip("/")
output_directory = project_directory + "_amd"
if project_directory != output_directory:
includes = [include.replace(project_directory, output_directory) for include in includes]
ignores = [ignore.replace(project_directory, output_directory) for ignore in ignores]
# Copy from project directory to output directory if not done already.
if not os.path.exists(output_directory):
shutil.copytree(project_directory, output_directory)
includes = list(map(_to_unix_path, includes))
ignores = list(map(_to_unix_path, ignores))
all_files = list(matched_files_iter(output_directory, includes=includes,
ignores=ignores, extensions=extensions,
out_of_place_only=out_of_place_only,
is_pytorch_extension=is_pytorch_extension))
all_files_set = set(all_files)
for f in extra_files:
if not os.path.isabs(f):
f = os.path.join(output_directory, f)
if f not in all_files_set:
all_files.append(f)
# List all files in header_include_paths to ensure they are hipified
from pathlib import Path
for header_include_dir in header_include_dirs:
if os.path.isabs(header_include_dir):
header_include_dir_path = Path(header_include_dir)
else:
header_include_dir_path = Path(os.path.join(output_directory, header_include_dir))
all_files.extend(
str(path) for path in header_include_dir_path.rglob('*') if path.is_file()
and _fnmatch(str(path), includes)
and (not _fnmatch(str(path), ignores))
and match_extensions(path.name, header_extensions)
)
if clean_ctx is None:
clean_ctx = GeneratedFileCleaner(keep_intermediates=True)
# Preprocessing statistics.
stats: dict[str, list] = {"unsupported_calls": [], "kernel_launches": []}
for filepath in (all_files if not hipify_extra_files_only else extra_files):
preprocess_file_and_save_result(output_directory, filepath, all_files, header_include_dirs,
stats, hip_clang_launch, is_pytorch_extension, clean_ctx, show_progress)
print(bcolors.OKGREEN + "Successfully preprocessed all matching files." + bcolors.ENDC, file=sys.stderr)
# Show detailed summary
if show_detailed:
compute_stats(stats)
return HIPIFY_FINAL_RESULT
```
|
=====================================================================================================================
SOURCE CODE FILE: version.py
LINES: 1
SIZE: 0.02 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\hipify\version.py
ENCODING: utf-8
```py
__version__ = '1.0.0'
```
|
============================================================================================================
SOURCE CODE FILE: hooks.py
LINES: 1
SIZE: 9.54 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\hooks.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
import torch
from collections import OrderedDict
import weakref
import warnings
from typing import Any
__all__ = ["RemovableHandle", "unserializable_hook", "warn_if_has_hooks", "BackwardHook"]
class RemovableHandle:
r"""
A handle which provides the capability to remove a hook.
Args:
hooks_dict (dict): A dictionary of hooks, indexed by hook ``id``.
extra_dict (Union[dict, List[dict]]): An additional dictionary or list of
dictionaries whose keys will be deleted when the same keys are
removed from ``hooks_dict``.
"""
id: int
next_id: int = 0
def __init__(self, hooks_dict: Any, *, extra_dict: Any = None) -> None:
self.hooks_dict_ref = weakref.ref(hooks_dict)
self.id = RemovableHandle.next_id
RemovableHandle.next_id += 1
self.extra_dict_ref: tuple = ()
if isinstance(extra_dict, dict):
self.extra_dict_ref = (weakref.ref(extra_dict),)
elif isinstance(extra_dict, list):
self.extra_dict_ref = tuple(weakref.ref(d) for d in extra_dict)
def remove(self) -> None:
hooks_dict = self.hooks_dict_ref()
if hooks_dict is not None and self.id in hooks_dict:
del hooks_dict[self.id]
for ref in self.extra_dict_ref:
extra_dict = ref()
if extra_dict is not None and self.id in extra_dict:
del extra_dict[self.id]
def __getstate__(self):
if self.extra_dict_ref is None:
return (self.hooks_dict_ref(), self.id)
else:
return (self.hooks_dict_ref(), self.id, tuple(ref() for ref in self.extra_dict_ref))
def __setstate__(self, state) -> None:
if state[0] is None:
# create a dead reference
self.hooks_dict_ref = weakref.ref(OrderedDict())
else:
self.hooks_dict_ref = weakref.ref(state[0])
self.id = state[1]
RemovableHandle.next_id = max(RemovableHandle.next_id, self.id + 1)
if len(state) < 3 or state[2] is None:
self.extra_dict_ref = ()
else:
self.extra_dict_ref = tuple(weakref.ref(d) for d in state[2])
def __enter__(self) -> "RemovableHandle":
return self
def __exit__(self, type: Any, value: Any, tb: Any) -> None:
self.remove()
def unserializable_hook(f):
"""
Mark a function as an unserializable hook with this decorator.
This suppresses warnings that would otherwise arise if you attempt
to serialize a tensor that has a hook.
"""
f.__torch_unserializable__ = True
return f
def warn_if_has_hooks(tensor):
if tensor._backward_hooks:
for k in tensor._backward_hooks:
hook = tensor._backward_hooks[k]
if not hasattr(hook, "__torch_unserializable__"):
warnings.warn(f"backward hook {repr(hook)} on tensor will not be "
"serialized. If this is expected, you can "
"decorate the function with @torch.utils.hooks.unserializable_hook "
"to suppress this warning")
class BackwardHook:
"""
A wrapper class to implement nn.Module backward hooks.
It handles:
- Ignoring non-Tensor inputs and replacing them by None before calling the user hook
- Generating the proper Node to capture a set of Tensor's gradients
- Linking the gradients captures for the outputs with the gradients captured for the input
- Calling the user hook once both output and input gradients are available
"""
def __init__(self, module, user_hooks, user_pre_hooks):
self.user_hooks = user_hooks
self.user_pre_hooks = user_pre_hooks
self.module = module
self.grad_outputs = None
self.n_outputs = -1
self.output_tensors_index = None
self.n_inputs = -1
self.input_tensors_index = None
def _pack_with_none(self, indices, values, size):
res = [None] * size
for idx, val in zip(indices, values):
res[idx] = val
return tuple(res)
def _unpack_none(self, indices, values):
res = [values[idx] for idx in indices]
return tuple(res)
def _set_user_hook(self, grad_fn):
def hook(grad_input, _):
if self.grad_outputs is None:
# This happens because the gradient in your nn.Module flows to
# the Module's input without " passing through the Module's
# output, e.g. when you're doing double backward.
return
res = self._pack_with_none(self.input_tensors_index, grad_input, self.n_inputs)
for hook in self.user_hooks:
out = hook(self.module, res, self.grad_outputs)
if out is None:
continue
if len(out) != len(res):
raise RuntimeError("Backward hook returned an invalid number of grad_input, "
f"got {len(out)}, but expected {len(res)}")
res = out
self.grad_outputs = None
return self._unpack_none(self.input_tensors_index, res)
grad_fn.register_hook(hook)
def _apply_on_tensors(self, fn, args):
# Can be used to apply the given function to the tensors contained in the
# args. Will return updated args and the tensors indices
tensors_idx = []
tensors = []
requires_grad = False
for i, arg in enumerate(args):
if isinstance(arg, torch.Tensor):
tensors_idx.append(i)
tensors.append(arg)
requires_grad |= arg.requires_grad
if not (requires_grad and torch.is_grad_enabled()):
return args, None
new_tensors = torch.nn.modules._functions.BackwardHookFunction.apply(*tensors)
if len(new_tensors) == 0:
raise RuntimeError("Cannot set Module backward hook for a Module with no input Tensors.")
grad_fns = [t.grad_fn for t in new_tensors if t.grad_fn is not None and t.grad_fn.name() == "BackwardHookFunctionBackward"]
if len(grad_fns) == 0:
raise RuntimeError("Error while setting up backward hooks. Please open "
"an issue with a code sample to reproduce this.")
fn(grad_fns[0])
arg_list = list(args)
for idx, val in zip(tensors_idx, new_tensors):
arg_list[idx] = val
if type(args) is tuple:
out = tuple(arg_list)
else:
out = type(args)(*arg_list)
return out, tensors_idx
def setup_input_hook(self, args):
def fn(grad_fn):
self._set_user_hook(grad_fn)
res, input_idx = self._apply_on_tensors(fn, args)
self.n_inputs = len(args)
self.input_tensors_index = input_idx
return res
def setup_output_hook(self, args):
def fn(grad_fn):
def hook(_, grad_output):
self.grad_outputs = self._pack_with_none(self.output_tensors_index,
grad_output,
self.n_outputs)
if self.user_pre_hooks:
expected_len = len(self.grad_outputs)
for user_pre_hook in self.user_pre_hooks:
hook_grad_outputs = user_pre_hook(self.module, self.grad_outputs)
if hook_grad_outputs is None:
continue
actual_len = len(hook_grad_outputs)
if actual_len != expected_len:
raise RuntimeError("Backward pre hook returned an invalid number of grad_output, "
f"got {actual_len}, but expected {expected_len}")
self.grad_outputs = hook_grad_outputs
# We need to be able to clear self.grad_outputs but also return it
local_grad_outputs = self.grad_outputs
# Special case if no input required gradients, this hook should call the user
# hook directly
if self.input_tensors_index is None:
grad_inputs = self._pack_with_none([], [], self.n_inputs)
for user_hook in self.user_hooks:
res = user_hook(self.module, grad_inputs, self.grad_outputs)
if res is not None and not (isinstance(res, tuple) and all(el is None for el in res)):
raise RuntimeError("Backward hook for Modules where no input requires "
"gradient should always return None or None for all gradients.")
self.grad_outputs = None
if local_grad_outputs is not None:
assert self.output_tensors_index is not None # mypy
return tuple(local_grad_outputs[i] for i in self.output_tensors_index)
grad_fn.register_hook(hook)
is_tuple = True
if not isinstance(args, tuple):
args = (args,)
is_tuple = False
res, output_idx = self._apply_on_tensors(fn, args)
self.n_outputs = len(args)
self.output_tensors_index = output_idx
if not is_tuple:
res = res[0]
return res
```
|
===================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\jit\__init__.py
ENCODING: utf-8
```py
```
|
======================================================================================================================
SOURCE CODE FILE: log_extract.py
LINES: 1
SIZE: 3.77 KB
PATH: scripts\freecad_env\Lib\site-packages\torch\utils\jit\log_extract.py
ENCODING: utf-8
```py
# mypy: allow-untyped-defs
from contextlib import contextmanager
from typing import Any, cast
import random
import torch
import time
from torch.utils.benchmark import Timer
def extract_ir(filename: str) -> list[str]:
BEGIN = "<GRAPH_EXPORT>"
END = "</GRAPH_EXPORT>"
pfx = None
graphs = []
with open(filename) as f:
split_strs = f.read().split(BEGIN)
for i, split_str in enumerate(split_strs):
if i == 0:
continue
end_loc = split_str.find(END)
if end_loc == -1:
continue
s = split_str[:end_loc]
pfx = split_strs[i - 1].splitlines()[-1]
lines = [x[len(pfx):] for x in s.splitlines(keepends=True)]
graphs.append(''.join(lines))
return graphs
def make_tensor_from_type(inp_type: torch._C.TensorType):
size = inp_type.sizes()
stride = inp_type.strides()
device = inp_type.device()
dtype = inp_type.dtype()
assert size is not None
assert stride is not None
assert device is not None
assert dtype is not None
return torch.empty_strided(size=size, stride=stride, device=device, dtype=dtype)
def load_graph_and_inputs(ir: str) -> tuple[Any, list[Any]]:
graph = torch._C.parse_ir(ir, parse_tensor_constants=True)
graph.makeMultiOutputIntoTuple()
inputs = []
for inp in graph.inputs():
if isinstance(inp.type(), torch._C.FloatType):
inputs.append(random.uniform(.1, 100))
elif isinstance(inp.type(), torch._C.IntType):
inputs.append(random.randint(1, 100))
elif isinstance(inp.type(), torch._C.TensorType):
tensorType = cast(torch._C.TensorType, inp.type())
inputs.append(make_tensor_from_type(tensorType))
elif isinstance(inp.type(), torch._C.BoolType):
inputs.append(random.randint(0, 1) == 1)
else:
raise NotImplementedError(f"A default value is not implemented for type {inp.type()}")
func = torch._C._create_function_from_graph("forward", graph)
torch._C._jit_pass_erase_shape_information(func.graph)
return (func, inputs)
def time_cuda(fn, inputs, test_runs):
t = Timer(stmt="fn(*inputs)", globals={"fn": fn, "inputs" : inputs})
times = t.blocked_autorange()
return times.median * 1000 # time in ms
def time_cpu(fn, inputs, test_runs):
s = time.perf_counter()
for _ in range(test_runs):
fn(*inputs)
e = time.perf_counter()
return (e - s) / test_runs * 1000 # time in ms
def run_test(ir, inputs, *, warmup_runs=10, test_runs=20) -> float:
graph, _ = load_graph_and_inputs(ir)
for _ in range(warmup_runs):
graph(*inputs)
is_cpu = None
for input in inputs:
if isinstance(input, torch.Tensor):
is_cpu = input.device.type == "cpu"
break
assert is_cpu is not None
out = time_cpu(graph, inputs, test_runs) if is_cpu else time_cuda(graph, inputs, test_runs)
return out
@contextmanager
def no_fuser(*args, **kwargs):
old_optimize = torch._C._get_graph_executor_optimize(False)
try:
yield
finally:
torch._C._get_graph_executor_optimize(old_optimize)
def run_baseline_no_fusion(ir, inputs) -> float:
with no_fuser():
return run_test(ir, inputs)
def run_nnc(ir, inputs, dynamic) -> float:
try:
strat = [("DYNAMIC", 10)] if dynamic else [("STATIC", 10)]
old_strat = torch.jit.set_fusion_strategy(strat)
with torch.jit.fuser("fuser1"):
return run_test(ir, inputs)
finally:
torch.jit.set_fusion_strategy(old_strat)
def run_nvfuser(ir, inputs) -> float:
with torch.jit.fuser("fuser2"):
return run_test(ir, inputs)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.