diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b042126d99e217b789853d4a52b871918f9162f6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__init__.py @@ -0,0 +1,1412 @@ +r""" +This package adds support for CUDA tensor types. + +It implements the same function as CPU tensors, but they utilize +GPUs for computation. + +It is lazily initialized, so you can always import it, and use +:func:`is_available()` to determine if your system supports CUDA. + +:ref:`cuda-semantics` has more details about working with CUDA. +""" + + +import contextlib +import importlib +import os +import sys +import threading +import traceback +import warnings +from functools import lru_cache +from typing import Any, Callable, cast, List, Optional, Tuple, Union + +import torch +import torch._C +from torch.types import Device +from .. import device as _device +from .._utils import _dummy_type, _LazySeedTracker, classproperty +from ._utils import _get_device_index +from .graphs import ( + CUDAGraph, + graph, + graph_pool_handle, + is_current_stream_capturing, + make_graphed_callables, +) +from .streams import Event, ExternalStream, Stream + +try: + from torch._C import _cudart # type: ignore[attr-defined] +except ImportError: + _cudart = None + +_initialized = False +_tls = threading.local() +_initialization_lock = threading.Lock() +_queued_calls: List[ + Tuple[Callable[[], None], List[str]] +] = [] # don't invoke these until initialization occurs +_is_in_bad_fork = getattr(torch._C, "_cuda_isInBadFork", lambda: False) +_device_t = Union[_device, str, int, None] + +_HAS_PYNVML = False +_PYNVML_ERR = None +try: + import pynvml # type: ignore[import] + + _HAS_PYNVML = True +except ImportError as err: + _PYNVML_ERR = err # sometimes a lib is installed but the import fails for some other reason, so we log the error for later + +_lazy_seed_tracker = _LazySeedTracker() + +# Define dummy _CudaDeviceProperties type if PyTorch was compiled without CUDA +if hasattr(torch._C, "_CudaDeviceProperties"): + _CudaDeviceProperties = torch._C._CudaDeviceProperties +else: + _CudaDeviceProperties = _dummy_type("_CudaDeviceProperties") # type: ignore[assignment, misc] + +if hasattr(torch._C, "_cuda_exchangeDevice"): + _exchange_device = torch._C._cuda_exchangeDevice +else: + + def _exchange_device(device: int) -> int: + if device < 0: + return -1 + raise RuntimeError("PyTorch was compiled without CUDA support") + + +if hasattr(torch._C, "_cuda_maybeExchangeDevice"): + _maybe_exchange_device = torch._C._cuda_maybeExchangeDevice +else: + + def _maybe_exchange_device(device: int) -> int: + if device < 0: + return -1 + raise RuntimeError("PyTorch was compiled without CUDA support") + + +has_half: bool = True +has_magma: bool = torch._C._has_magma + +default_generators: Tuple[torch._C.Generator] = () # type: ignore[assignment] + + +def _is_compiled() -> bool: + r"""Return true if compile with CUDA support.""" + return hasattr(torch._C, "_cuda_getDeviceCount") + + +def _nvml_based_avail() -> bool: + return os.getenv("PYTORCH_NVML_BASED_CUDA_CHECK") == "1" + + +def is_available() -> bool: + r"""Return a bool indicating if CUDA is currently available.""" + if not _is_compiled(): + return False + if _nvml_based_avail(): + # The user has set an env variable to request this availability check that attempts to avoid fork poisoning by + # using NVML at the cost of a weaker CUDA availability assessment. Note that if NVML discovery/initialization + # fails, this assessment falls back to the default CUDA Runtime API assessment (`cudaGetDeviceCount`) + return device_count() > 0 + else: + # The default availability inspection never throws and returns 0 if the driver is missing or can't + # be initialized. This uses the CUDA Runtime API `cudaGetDeviceCount` which in turn initializes the CUDA Driver + # API via `cuInit` + return torch._C._cuda_getDeviceCount() > 0 + + +def is_bf16_supported(): + r"""Return a bool indicating if the current CUDA/ROCm device supports dtype bfloat16.""" + # Check for ROCm, if true return true, no ROCM_VERSION check required, + # since it is supported on AMD GPU archs. + if torch.version.hip: + return True + + device = torch.cuda.current_device() + + # Check for CUDA version and device compute capability. + # This is a fast way to check for it. + cuda_version = torch.version.cuda + if ( + cuda_version is not None + and int(cuda_version.split(".")[0]) >= 11 + and torch.cuda.get_device_properties(device).major >= 8 + ): + return True + + # Finally try to create a bfloat16 device. + return _check_bf16_tensor_supported(device) + + +@lru_cache(maxsize=16) +def _check_bf16_tensor_supported(device: _device_t): + try: + torch.tensor([1.0], dtype=torch.bfloat16, device=device) + return True + except Exception: + return False + + +def _sleep(cycles): + torch._C._cuda_sleep(cycles) + + +def _check_capability(): + incorrect_binary_warn = """ + Found GPU%d %s which requires CUDA_VERSION >= %d to + work properly, but your PyTorch was compiled + with CUDA_VERSION %d. Please install the correct PyTorch binary + using instructions from https://pytorch.org + """ + + old_gpu_warn = """ + Found GPU%d %s which is of cuda capability %d.%d. + PyTorch no longer supports this GPU because it is too old. + The minimum cuda capability supported by this library is %d.%d. + """ + + if torch.version.cuda is not None: # on ROCm we don't want this check + CUDA_VERSION = torch._C._cuda_getCompiledVersion() + for d in range(device_count()): + capability = get_device_capability(d) + major = capability[0] + minor = capability[1] + name = get_device_name(d) + current_arch = major * 10 + minor + min_arch = min( + (int(arch.split("_")[1]) for arch in torch.cuda.get_arch_list()), + default=35, + ) + if current_arch < min_arch: + warnings.warn( + old_gpu_warn + % (d, name, major, minor, min_arch // 10, min_arch % 10) + ) + + +def _check_cubins(): + incompatible_device_warn = """ +{} with CUDA capability sm_{} is not compatible with the current PyTorch installation. +The current PyTorch install supports CUDA capabilities {}. +If you want to use the {} GPU with PyTorch, please check the instructions at https://pytorch.org/get-started/locally/ +""" + if torch.version.cuda is None: # on ROCm we don't want this check + return + arch_list = get_arch_list() + if len(arch_list) == 0: + return + supported_sm = [int(arch.split("_")[1]) for arch in arch_list if "sm_" in arch] + for idx in range(device_count()): + cap_major, cap_minor = get_device_capability(idx) + # NVIDIA GPU compute architectures are backward compatible within major version + supported = any(sm // 10 == cap_major for sm in supported_sm) + if not supported: + device_name = get_device_name(idx) + capability = cap_major * 10 + cap_minor + warnings.warn( + incompatible_device_warn.format( + device_name, capability, " ".join(arch_list), device_name + ) + ) + + +def is_initialized(): + r"""Return whether PyTorch's CUDA state has been initialized.""" + return _initialized and not _is_in_bad_fork() + + +def _lazy_call(callable, **kwargs): + if is_initialized(): + callable() + else: + # TODO(torch_deploy): this accesses linecache, which attempts to read the + # file system to get traceback info. Patch linecache or do something + # else here if this ends up being important. + global _lazy_seed_tracker + if kwargs.get("seed_all", False): + _lazy_seed_tracker.queue_seed_all(callable, traceback.format_stack()) + elif kwargs.get("seed", False): + _lazy_seed_tracker.queue_seed(callable, traceback.format_stack()) + else: + # Don't store the actual traceback to avoid memory cycle + _queued_calls.append((callable, traceback.format_stack())) + + +_lazy_call(_check_capability) +_lazy_call(_check_cubins) + + +class DeferredCudaCallError(Exception): + pass + + +OutOfMemoryError = torch._C._OutOfMemoryError + + +def init(): + r"""Initialize PyTorch's CUDA state. + + You may need to call this explicitly if you are interacting with + PyTorch via its C API, as Python bindings for CUDA functionality + will not be available until this initialization takes place. + Ordinary users should not need this, as all of PyTorch's CUDA methods + automatically initialize CUDA state on-demand. + + Does nothing if the CUDA state is already initialized. + """ + _lazy_init() + + +def _lazy_init(): + global _initialized, _queued_calls + if is_initialized() or hasattr(_tls, "is_initializing"): + return + with _initialization_lock: + # We be double-checked locking, boys! This is OK because + # the above test was GIL protected anyway. The inner test + # is for when a thread blocked on some other thread which was + # doing the initialization; when they get the lock, they will + # find there is nothing left to do. + if is_initialized(): + return + # It is important to prevent other threads from entering _lazy_init + # immediately, while we are still guaranteed to have the GIL, because some + # of the C calls we make below will release the GIL + if _is_in_bad_fork(): + raise RuntimeError( + "Cannot re-initialize CUDA in forked subprocess. To use CUDA with " + "multiprocessing, you must use the 'spawn' start method" + ) + if not hasattr(torch._C, "_cuda_getDeviceCount"): + raise AssertionError("Torch not compiled with CUDA enabled") + if _cudart is None: + raise AssertionError( + "libcudart functions unavailable. It looks like you have a broken build?" + ) + # This function throws if there's a driver initialization error, no GPUs + # are found or any other error occurs + if "CUDA_MODULE_LOADING" not in os.environ: + os.environ["CUDA_MODULE_LOADING"] = "LAZY" + torch._C._cuda_init() + # Some of the queued calls may reentrantly call _lazy_init(); + # we need to just return without initializing in that case. + # However, we must not let any *other* threads in! + _tls.is_initializing = True + + for calls in _lazy_seed_tracker.get_calls(): + if calls: + _queued_calls.append(calls) + + try: + for queued_call, orig_traceback in _queued_calls: + try: + queued_call() + except Exception as e: + msg = ( + f"CUDA call failed lazily at initialization with error: {str(e)}\n\n" + f"CUDA call was originally invoked at:\n\n{''.join(orig_traceback)}" + ) + raise DeferredCudaCallError(msg) from e + finally: + delattr(_tls, "is_initializing") + _initialized = True + + +def cudart(): + _lazy_init() + return _cudart + + +class cudaStatus: + SUCCESS: int = 0 + ERROR_NOT_READY: int = 34 + + +class CudaError(RuntimeError): + def __init__(self, code: int) -> None: + msg = _cudart.cudaGetErrorString(_cudart.cudaError(code)) + super().__init__(f"{msg} ({code})") + + +def check_error(res: int) -> None: + if res != _cudart.cudaError.success: + raise CudaError(res) + + +class _DeviceGuard: + def __init__(self, index: int): + self.idx = index + self.prev_idx = -1 + + def __enter__(self): + self.prev_idx = torch.cuda._exchange_device(self.idx) + + def __exit__(self, type: Any, value: Any, traceback: Any): + self.idx = torch.cuda._maybe_exchange_device(self.prev_idx) + return False + + +class device: + r"""Context-manager that changes the selected device. + + Args: + device (torch.device or int): device index to select. It's a no-op if + this argument is a negative integer or ``None``. + """ + + def __init__(self, device: Any): + self.idx = _get_device_index(device, optional=True) + self.prev_idx = -1 + + def __enter__(self): + self.prev_idx = torch.cuda._exchange_device(self.idx) + + def __exit__(self, type: Any, value: Any, traceback: Any): + self.idx = torch.cuda._maybe_exchange_device(self.prev_idx) + return False + + +class device_of(device): + r"""Context-manager that changes the current device to that of given object. + + You can use both tensors and storages as arguments. If a given object is + not allocated on a GPU, this is a no-op. + + Args: + obj (Tensor or Storage): object allocated on the selected device. + """ + + def __init__(self, obj): + idx = obj.get_device() if obj.is_cuda else -1 + super().__init__(idx) + + +def set_device(device: _device_t) -> None: + r"""Set the current device. + + Usage of this function is discouraged in favor of :any:`device`. In most + cases it's better to use ``CUDA_VISIBLE_DEVICES`` environmental variable. + + Args: + device (torch.device or int): selected device. This function is a no-op + if this argument is negative. + """ + device = _get_device_index(device) + if device >= 0: + torch._C._cuda_setDevice(device) + + +def get_device_name(device: Optional[_device_t] = None) -> str: + r"""Get the name of a device. + + Args: + device (torch.device or int, optional): device for which to return the + name. This function is a no-op if this argument is a negative + integer. It uses the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Returns: + str: the name of the device + """ + return get_device_properties(device).name + + +def get_device_capability(device: Optional[_device_t] = None) -> Tuple[int, int]: + r"""Get the cuda capability of a device. + + Args: + device (torch.device or int, optional): device for which to return the + device capability. This function is a no-op if this argument is + a negative integer. It uses the current device, given by + :func:`~torch.cuda.current_device`, if :attr:`device` is ``None`` + (default). + + Returns: + tuple(int, int): the major and minor cuda capability of the device + """ + prop = get_device_properties(device) + return prop.major, prop.minor + + +def get_device_properties(device: _device_t) -> _CudaDeviceProperties: + r"""Get the properties of a device. + + Args: + device (torch.device or int or str): device for which to return the + properties of the device. + + Returns: + _CudaDeviceProperties: the properties of the device + """ + _lazy_init() # will define _get_device_properties + device = _get_device_index(device, optional=True) + if device < 0 or device >= device_count(): + raise AssertionError("Invalid device id") + return _get_device_properties(device) # type: ignore[name-defined] + + +def can_device_access_peer(device: _device_t, peer_device: _device_t) -> bool: + r"""Check if peer access between two devices is possible.""" + _lazy_init() + device = _get_device_index(device, optional=True) + peer_device = _get_device_index(peer_device) + if device < 0 or device >= device_count(): + raise AssertionError("Invalid device id") + if peer_device < 0 or peer_device >= device_count(): + raise AssertionError("Invalid peer device id") + return torch._C._cuda_canDeviceAccessPeer(device, peer_device) + + +class StreamContext: + r"""Context-manager that selects a given stream. + + All CUDA kernels queued within its context will be enqueued on a selected + stream. + + Args: + Stream (Stream): selected stream. This manager is a no-op if it's + ``None``. + .. note:: Streams are per-device. + """ + cur_stream: Optional["torch.cuda.Stream"] + + def __init__(self, stream: Optional["torch.cuda.Stream"]): + self.stream = stream + self.idx = _get_device_index(None, True) + if not torch.jit.is_scripting(): + if self.idx is None: + self.idx = -1 + + self.src_prev_stream = ( + None if not torch.jit.is_scripting() else torch.cuda.default_stream(None) + ) + self.dst_prev_stream = ( + None if not torch.jit.is_scripting() else torch.cuda.default_stream(None) + ) + + def __enter__(self): + # Local cur_stream variable for type refinement + cur_stream = self.stream + # Return if stream is None or CUDA device not available + if cur_stream is None or self.idx == -1: + return + self.src_prev_stream = torch.cuda.current_stream(None) + + # If the stream is not on the current device, then + # set the current stream on the device + if self.src_prev_stream.device != cur_stream.device: + with device(cur_stream.device): + self.dst_prev_stream = torch.cuda.current_stream(cur_stream.device) + torch.cuda.set_stream(cur_stream) + + def __exit__(self, type: Any, value: Any, traceback: Any): + # Local cur_stream variable for type refinement + cur_stream = self.stream + # If stream is None or no CUDA device available, return + if cur_stream is None or self.idx == -1: + return + + # Reset the stream on the original device + # and destination device + if self.src_prev_stream.device != cur_stream.device: # type: ignore[union-attr] + torch.cuda.set_stream(self.dst_prev_stream) # type: ignore[arg-type] + torch.cuda.set_stream(self.src_prev_stream) # type: ignore[arg-type] + + +def stream(stream: Optional["torch.cuda.Stream"]) -> StreamContext: + r"""Wrap around the Context-manager StreamContext that selects a given stream. + + Arguments: + stream (Stream): selected stream. This manager is a no-op if it's + ``None``. + ..Note:: In eager mode stream is of type Stream class while in JIT it is + an object of the custom class ``torch.classes.cuda.Stream``. + """ + return StreamContext(stream) + + +def _set_stream_by_id(stream_id, device_index, device_type): + r"""set stream specified by the stream id, device index and + device type + + Args: stream_id (int): stream id in stream pool + device_index (int): device index in topo + device_type (int): enum device type + """ + torch._C._cuda_setStream( + stream_id=stream_id, + device_index=device_index, + device_type=device_type, + ) + + +def set_stream(stream: Stream): + r"""Set the current stream.This is a wrapper API to set the stream. + Usage of this function is discouraged in favor of the ``stream`` + context manager. + + Args: + stream (Stream): selected stream. This function is a no-op + if this argument is ``None``. + """ + if stream is None: + return + _set_stream_by_id( + stream_id=stream.stream_id, + device_index=stream.device_index, + device_type=stream.device_type, + ) + + +def _parse_visible_devices() -> Union[List[int], List[str]]: + r"""Parse CUDA_VISIBLE_DEVICES environment variable.""" + var = os.getenv("CUDA_VISIBLE_DEVICES") + if var is None: + return list(range(64)) + + def _strtoul(s: str) -> int: + """Return -1 or positive integer sequence string starts with.""" + if not s: + return -1 + for idx, c in enumerate(s): + if not (c.isdigit() or (idx == 0 and c in "+-")): + break + if idx + 1 == len(s): + idx += 1 + return int(s[:idx]) if idx > 0 else -1 + + def parse_list_with_prefix(lst: str, prefix: str) -> List[str]: + rcs: List[str] = [] + for elem in lst.split(","): + # Repeated id results in empty set + if elem in rcs: + return cast(List[str], []) + # Anything other but prefix is ignored + if not elem.startswith(prefix): + break + rcs.append(elem) + return rcs + + if var.startswith("GPU-"): + return parse_list_with_prefix(var, "GPU-") + if var.startswith("MIG-"): + return parse_list_with_prefix(var, "MIG-") + # CUDA_VISIBLE_DEVICES uses something like strtoul + # which makes `1gpu2,2ampere` is equivalent to `1,2` + rc: List[int] = [] + for elem in var.split(","): + x = _strtoul(elem.strip()) + # Repeated ordinal results in empty set + if x in rc: + return cast(List[int], []) + # Negative value aborts the sequence + if x < 0: + break + rc.append(x) + return rc + + +def _raw_device_count_nvml() -> int: + r"""Return number of devices as reported by NVML or negative value if NVML discovery/initialization failed.""" + from ctypes import byref, c_int, CDLL + + nvml_h = CDLL("libnvidia-ml.so.1") + rc = nvml_h.nvmlInit() + if rc != 0: + warnings.warn("Can't initialize NVML") + return -1 + dev_count = c_int(-1) + rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count)) + if rc != 0: + warnings.warn("Can't get nvml device count") + return -1 + del nvml_h + return dev_count.value + + +def _raw_device_uuid_nvml() -> Optional[List[str]]: + r"""Return list of device UUID as reported by NVML or None if NVM discovery/initialization failed.""" + from ctypes import byref, c_int, c_void_p, CDLL, create_string_buffer + + nvml_h = CDLL("libnvidia-ml.so.1") + rc = nvml_h.nvmlInit() + if rc != 0: + warnings.warn("Can't initialize NVML") + return None + dev_count = c_int(-1) + rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count)) + if rc != 0: + warnings.warn("Can't get nvml device count") + return None + uuids: List[str] = [] + for idx in range(dev_count.value): + dev_id = c_void_p() + rc = nvml_h.nvmlDeviceGetHandleByIndex_v2(idx, byref(dev_id)) + if rc != 0: + warnings.warn("Can't get device handle") + return None + buf_len = 96 + buf = create_string_buffer(buf_len) + rc = nvml_h.nvmlDeviceGetUUID(dev_id, buf, buf_len) + if rc != 0: + warnings.warn("Can't get device UUID") + return None + uuids.append(buf.raw.decode("ascii").strip("\0")) + del nvml_h + return uuids + + +def _transform_uuid_to_ordinals(candidates: List[str], uuids: List[str]) -> List[int]: + r"""Given the set of partial uuids and list of known uuids builds a set of ordinals excluding ambiguous partials IDs.""" + + def uuid_to_orinal(candidate: str, uuids: List[str]) -> int: + best_match = -1 + for idx, uuid in enumerate(uuids): + if not uuid.startswith(candidate): + continue + # Ambiguous candidate + if best_match != -1: + return -1 + best_match = idx + return best_match + + rc: List[int] = [] + for candidate in candidates: + idx = uuid_to_orinal(candidate, uuids) + # First invalid ordinal stops parsing + if idx < 0: + break + # Duplicates result in empty set + if idx in rc: + return cast(List[int], []) + rc.append(idx) + return rc + + +def _device_count_nvml() -> int: + r"""Return number of devices as reported by NVML taking CUDA_VISIBLE_DEVICES into account. + + Negative value is returned if NVML discovery or initialization has failed. + """ + visible_devices = _parse_visible_devices() + if not visible_devices: + return 0 + try: + if type(visible_devices[0]) is str: + # Skip MIG parsing + if visible_devices[0].startswith("MIG-"): + return -1 + uuids = _raw_device_uuid_nvml() + if uuids is None: + return -1 + visible_devices = _transform_uuid_to_ordinals( + cast(List[str], visible_devices), uuids + ) + else: + raw_cnt = _raw_device_count_nvml() + if raw_cnt <= 0: + return raw_cnt + # Trim the list up to a maximum available device + for idx, val in enumerate(visible_devices): + if cast(int, val) >= raw_cnt: + return idx + except OSError: + return -1 + except AttributeError: + return -1 + return len(visible_devices) + + +def _get_nvml_device_index(device: Optional[Union[int, Device]]) -> int: + r"""Return the NVML index of the device, taking CUDA_VISIBLE_DEVICES into account.""" + idx = _get_device_index(device, optional=True) + visible_devices = _parse_visible_devices() + if type(visible_devices[0]) is str: + uuids = _raw_device_uuid_nvml() + if uuids is None: + raise RuntimeError("Can't get device UUIDs") + visible_devices = _transform_uuid_to_ordinals( + cast(List[str], visible_devices), uuids + ) + visible_devices = cast(List[int], visible_devices) + if idx < 0 or idx >= len(visible_devices): + raise RuntimeError( + f"device {idx} is not visible (CUDA_VISIBLE_DEVICES={visible_devices})" + ) + return visible_devices[idx] + + +@lru_cache(maxsize=1) +def device_count() -> int: + r"""Return the number of GPUs available.""" + if not _is_compiled(): + return 0 + # bypass _device_count_nvml() if rocm (not supported) + nvml_count = -1 if torch.version.hip else _device_count_nvml() + return torch._C._cuda_getDeviceCount() if nvml_count < 0 else nvml_count + + +def get_arch_list() -> List[str]: + r"""Return list CUDA architectures this library was compiled for.""" + if not is_available(): + return [] + arch_flags = torch._C._cuda_getArchFlags() + if arch_flags is None: + return [] + return arch_flags.split() + + +def get_gencode_flags() -> str: + r"""Return NVCC gencode flags this library was compiled with.""" + arch_list = get_arch_list() + if len(arch_list) == 0: + return "" + arch_list_ = [arch.split("_") for arch in arch_list] + return " ".join( + [ + f"-gencode compute=compute_{arch},code={kind}_{arch}" + for (kind, arch) in arch_list_ + ] + ) + + +def current_device() -> int: + r"""Return the index of a currently selected device.""" + _lazy_init() + return torch._C._cuda_getDevice() + + +def synchronize(device: _device_t = None) -> None: + r"""Wait for all kernels in all streams on a CUDA device to complete. + + Args: + device (torch.device or int, optional): device for which to synchronize. + It uses the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + """ + _lazy_init() + with torch.cuda.device(device): + return torch._C._cuda_synchronize() + + +def ipc_collect(): + r"""Force collects GPU memory after it has been released by CUDA IPC. + + .. note:: + Checks if any sent CUDA tensors could be cleaned from the memory. Force + closes shared memory file used for reference counting if there is no + active counters. Useful when the producer process stopped actively sending + tensors and want to release unused memory. + """ + _lazy_init() + return torch._C._cuda_ipc_collect() + + +def current_stream(device: Optional[_device_t] = None) -> Stream: + r"""Return the currently selected :class:`Stream` for a given device. + + Args: + device (torch.device or int, optional): selected device. Returns + the currently selected :class:`Stream` for the current device, given + by :func:`~torch.cuda.current_device`, if :attr:`device` is ``None`` + (default). + """ + _lazy_init() + streamdata = torch._C._cuda_getCurrentStream( + _get_device_index(device, optional=True) + ) + return Stream( + stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2] + ) + + +def default_stream(device: Optional[_device_t] = None) -> Stream: + r"""Return the default :class:`Stream` for a given device. + + Args: + device (torch.device or int, optional): selected device. Returns + the default :class:`Stream` for the current device, given by + :func:`~torch.cuda.current_device`, if :attr:`device` is ``None`` + (default). + """ + _lazy_init() + streamdata = torch._C._cuda_getDefaultStream( + _get_device_index(device, optional=True) + ) + return Stream( + stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2] + ) + + +def current_blas_handle(): + r"""Return cublasHandle_t pointer to current cuBLAS handle""" + _lazy_init() + return torch._C._cuda_getCurrentBlasHandle() + + +def set_sync_debug_mode(debug_mode: Union[int, str]) -> None: + r"""Set the debug mode for cuda synchronizing operations. + + Args: + debug_mode(str or int): if "default" or 0, don't error or warn on synchronizing operations, + if "warn" or 1, warn on synchronizing operations, if "error" or 2, error out synchronizing operations. + + Warning: + This is an experimental feature, and not all synchronizing operations will trigger warning or error. In + particular, operations in torch.distributed and torch.sparse namespaces are not covered yet. + """ + _lazy_init() + if isinstance(debug_mode, str): + if debug_mode == "default": + debug_mode = 0 + elif debug_mode == "warn": + debug_mode = 1 + elif debug_mode == "error": + debug_mode = 2 + else: + raise RuntimeError( + "invalid value of debug_mode, expected one of `default`, `warn`, `error`" + ) + + torch._C._cuda_set_sync_debug_mode(debug_mode) + + +def get_sync_debug_mode() -> int: + r"""Return current value of debug mode for cuda synchronizing operations.""" + _lazy_init() + return torch._C._cuda_get_sync_debug_mode() + + +def _get_pynvml_handler(device: Optional[Union[Device, int]] = None): + if not _HAS_PYNVML: + raise ModuleNotFoundError( + "pynvml does not seem to be installed or it can't be imported." + ) from _PYNVML_ERR + from pynvml import NVMLError_DriverNotLoaded + + try: + pynvml.nvmlInit() + except NVMLError_DriverNotLoaded as e: + raise RuntimeError("cuda driver can't be loaded, is cuda enabled?") from e + + device = _get_nvml_device_index(device) + handle = pynvml.nvmlDeviceGetHandleByIndex(device) + return handle + + +def memory_usage(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the percent of time over the past sample period during which global (device) + memory was being read or written as given by `nvidia-smi`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler() + + device = _get_nvml_device_index(device) + handle = pynvml.nvmlDeviceGetHandleByIndex(device) + return pynvml.nvmlDeviceGetUtilizationRates(handle).memory + + +def utilization(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the percent of time over the past sample period during which one or + more kernels was executing on the GPU as given by `nvidia-smi`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler(device) + device = _get_nvml_device_index(device) + handle = pynvml.nvmlDeviceGetHandleByIndex(device) + return pynvml.nvmlDeviceGetUtilizationRates(handle).gpu + + +def temperature(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the average temperature of the GPU sensor in Degrees C (Centigrades). + + The average temperature is computed based on past sample period as given by `nvidia-smi`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler(device) + # 0 refers to the temperature sensor for the GPU die. + return pynvml.nvmlDeviceGetTemperature(handle, 0) + + +def power_draw(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the average power draw of the GPU sensor in mW (MilliWatts) + over the past sample period as given by `nvidia-smi` for Fermi or newer fully supported devices. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler(device) + return pynvml.nvmlDeviceGetPowerUsage(handle) + + +def clock_rate(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the clock speed of the GPU SM in Hz Hertz over the past sample period as given by `nvidia-smi`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler(device) + return pynvml.nvmlDeviceGetClockInfo(handle, 1) + + +def _get_device(device: Union[int, str, torch.device]) -> torch.device: + r"""Return the torch.device type object from the passed in device. + + Args: + device (torch.device or int): selected device. + """ + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device("cuda", device) + return device + + +def _get_generator(device: torch.device) -> torch._C.Generator: + r"""Return the CUDA Generator object for the given device. + + Args: + device (torch.device): selected device. + """ + idx = device.index + if idx is None: + idx = current_device() + return torch.cuda.default_generators[idx] + + +def _set_rng_state_offset( + offset: int, device: Union[int, str, torch.device] = "cuda" +) -> None: + r"""Set the random number generator state offset of the specified GPU. + + Args: + offset (int): The desired offset + device (torch.device or int, optional): The device to set the RNG state. + Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device). + """ + final_device = _get_device(device) + + def cb(): + default_generator = _get_generator(final_device) + default_generator.set_offset(offset) + + _lazy_call(cb) + + +def _get_rng_state_offset(device: Union[int, str, torch.device] = "cuda") -> int: + r"""Return the random number generator state offset of the specified GPU. + + Args: + device (torch.device or int, optional): The device to return the RNG state offset of. + Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device). + + .. warning:: + This function eagerly initializes CUDA. + """ + _lazy_init() + final_device = _get_device(device) + default_generator = _get_generator(final_device) + return default_generator.get_offset() + + +from .memory import * # noqa: F403 + + +from .random import * # noqa: F403 + +################################################################################ +# Define Storage and Tensor classes +################################################################################ + + +@staticmethod # type: ignore[misc] +def _lazy_new(cls, *args, **kwargs): + _lazy_init() + # We may need to call lazy init again if we are a forked child + # del _CudaBase.__new__ + return super(_CudaBase, cls).__new__(cls, *args, **kwargs) + + +class _CudaBase: + is_cuda = True + is_sparse = False + + def type(self, *args, **kwargs): + # We could use a Protocol here to tell mypy that self has `get_device` method + # but it is only available in the typing module on Python >= 3.8 + # or on typing_extensions module on Python >= 3.6 + with device(self.get_device()): # type: ignore[attr-defined] + return super().type(*args, **kwargs) # type: ignore[misc] + + __new__ = _lazy_new + + +from torch.storage import _LegacyStorage, _warn_typed_storage_removal + + +class _CudaLegacyStorage(_LegacyStorage): + @classmethod + def from_buffer(cls, *args, **kwargs): + _warn_typed_storage_removal() + raise RuntimeError("from_buffer: Not available for CUDA storage") + + @classmethod + def _new_with_weak_ptr(cls, *args, **kwargs): + raise RuntimeError("_new_with_weak_ptr: Not available for CUDA storage") + + @classmethod + def _new_shared_filename(cls, manager, obj, size, *, device=None, dtype=None): + raise RuntimeError("_new_shared_filename: Not available for CUDA storage") + + +class ByteStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.uint8 + + +class DoubleStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.double + + +class FloatStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.float + + +class HalfStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.half + + +class LongStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.long + + +class IntStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.int + + +class ShortStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.short + + +class CharStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.int8 + + +class BoolStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.bool + + +class BFloat16Storage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.bfloat16 + + +class ComplexDoubleStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.cdouble + + +class ComplexFloatStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.cfloat + + +del _LegacyStorage +del _CudaLegacyStorage + +torch._storage_classes.add(DoubleStorage) +torch._storage_classes.add(FloatStorage) +torch._storage_classes.add(LongStorage) +torch._storage_classes.add(IntStorage) +torch._storage_classes.add(ShortStorage) +torch._storage_classes.add(CharStorage) +torch._storage_classes.add(ByteStorage) +torch._storage_classes.add(HalfStorage) +torch._storage_classes.add(BoolStorage) +torch._storage_classes.add(BFloat16Storage) +torch._storage_classes.add(ComplexDoubleStorage) +torch._storage_classes.add(ComplexFloatStorage) + + +class _WrappedTritonKernel: + """Just a simple wrapper to store some metadata for testing purposes.""" + + def __init__(self, kernel): + self.kernel = kernel + self.kernel_invoked = False + + def __call__(self, *args, **kwargs): + res = self.kernel(*args, **kwargs) + self.kernel_invoked = True + return res + + +def _register_triton_kernels(): + if torch._running_with_deploy(): + return + + @_WrappedTritonKernel + def kernel_impl(*args, **kwargs): + from torch.sparse._triton_ops import bsr_dense_mm + + return bsr_dense_mm(*args, skip_checks=True, **kwargs) + + @_WrappedTritonKernel + def addmm_kernel_impl(*args, **kwargs): + from torch.sparse._triton_ops import bsr_dense_addmm + + return bsr_dense_addmm(*args, skip_checks=True, **kwargs) + + has_triton = importlib.util.find_spec("triton") is not None + if has_triton: + torch._TritonLibrary.registerOp( + "_triton_bsr_dense_mm_out", + "_triton_bsr_dense_mm_out(Tensor bsr, Tensor dense, *, Tensor(a!) out) -> Tensor(a!)", + kernel_impl, + "SparseCsrCUDA", + ) + + torch._TritonLibrary.registerOp( + "_triton_bsr_dense_addmm_out", + ( + "_triton_bsr_dense_addmm_out(Tensor input, Tensor bsr, Tensor dense," + " *, Scalar beta, Scalar alpha, Tensor(a!) out) -> Tensor(a!)" + ), + addmm_kernel_impl, + "SparseCsrCUDA", + ) + + +_lazy_call(_register_triton_kernels) + + +from . import amp, jiterator, nvtx, profiler, sparse + +__all__ = [ + # Typed storage and tensors + "BFloat16Storage", + "BFloat16Tensor", + "BoolStorage", + "BoolTensor", + "ByteStorage", + "ByteTensor", + "CharStorage", + "CharTensor", + "ComplexDoubleStorage", + "ComplexFloatStorage", + "DoubleStorage", + "DoubleTensor", + "FloatStorage", + "FloatTensor", + "HalfStorage", + "HalfTensor", + "IntStorage", + "IntTensor", + "LongStorage", + "LongTensor", + "ShortStorage", + "ShortTensor", + "CUDAGraph", + "CudaError", + "DeferredCudaCallError", + "Event", + "ExternalStream", + "OutOfMemoryError", + "Stream", + "StreamContext", + "amp", + "caching_allocator_alloc", + "caching_allocator_delete", + "can_device_access_peer", + "check_error", + "cudaStatus", + "cudart", + "current_blas_handle", + "current_device", + "current_stream", + "default_generators", + "default_stream", + "device", + "device_count", + "device_of", + "empty_cache", + "get_allocator_backend", + "CUDAPluggableAllocator", + "change_current_allocator", + "get_arch_list", + "get_device_capability", + "get_device_name", + "get_device_properties", + "get_gencode_flags", + "get_rng_state", + "get_rng_state_all", + "get_sync_debug_mode", + "graph", + "graph_pool_handle", + "graphs", + "has_half", + "has_magma", + "init", + "initial_seed", + "ipc_collect", + "is_available", + "is_bf16_supported", + "is_current_stream_capturing", + "is_initialized", + "jiterator", + "list_gpu_processes", + "make_graphed_callables", + "manual_seed", + "manual_seed_all", + "max_memory_allocated", + "max_memory_cached", + "max_memory_reserved", + "mem_get_info", + "memory", + "memory_allocated", + "memory_cached", + "memory_reserved", + "memory_snapshot", + "memory_stats", + "memory_stats_as_nested_dict", + "memory_summary", + "memory_usage", + "temperature", + "power_draw", + "clock_rate", + "nccl", + "nvtx", + "profiler", + "random", + "reset_accumulated_memory_stats", + "reset_max_memory_allocated", + "reset_max_memory_cached", + "reset_peak_memory_stats", + "seed", + "seed_all", + "set_device", + "set_per_process_memory_fraction", + "set_rng_state", + "set_rng_state_all", + "set_stream", + "set_sync_debug_mode", + "sparse", + "stream", + "streams", + "synchronize", + "utilization", +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98cd876d9d46f524d2a9100a20b862c0eb881e48 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16ef82ecf3e289481689ebec0ac8982f41531605 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99972f7111fda4113b44dcaa3a2808cbbcf32d91 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed7793dbaa9f829230319ec6016ff9086b42ec0c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/comm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/comm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80a6e7781344124629866ce8c408e60af15c3561 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/comm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e0c384d934cbb19adfb07438288075bc20d90ea Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af5f13751437e318727258ab8577a864aaae3a0f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc3e4aeb08a765b64aae72df889db9414978ff55 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..204ca149f7254a2e6e0962f2491603178883b2cf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98b1d18f5934012acd250ce3960297c23953721b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f61b8c41e98e722516f85212608a4f77a9d1906a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89ab4c0f326fa947703e1f495814b427ae63052e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1380458c5cfb89404a07a26c1ff99fa4e887bf1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4992c1f4f8ae2887c1c397f9afad26450ccb707f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00c0be8c8d2009f10f1adb0495a3afbe53426411 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/_memory_viz.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/_memory_viz.py new file mode 100644 index 0000000000000000000000000000000000000000..a862acd73184733dca0c811204456adc21394200 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/_memory_viz.py @@ -0,0 +1,626 @@ +import pickle +import sys +import os +import io +import subprocess +import json +from functools import lru_cache +from typing import Any +from itertools import groupby +import base64 +import warnings + +cache = lru_cache(None) + +__all__ = ["format_flamegraph", "segments", "memory", "compare"] + +def _frame_fmt(f, full_filename=False): + i = f['line'] + fname = f['filename'] + if not full_filename: + fname = fname.split('/')[-1] + func = f['name'] + return f'{fname}:{i}:{func}' + +@cache +def _frame_filter(name, filename): + omit_functions = [ + "unwind::unwind", + "CapturedTraceback::gather", + "gather_with_cpp", + "_start", + "__libc_start_main", + "PyEval_", + "PyObject_", + "PyFunction_", + ] + omit_filenames = [ + "core/boxing", + "/Register", + "/Redispatch", + "pythonrun.c", + "Modules/main.c", + "Objects/call.c", + "Objects/methodobject.c", + "pycore_ceval.h", + "ceval.c", + "cpython/abstract.h", + ] + for of in omit_functions: + if of in name: + return False + for of in omit_filenames: + if of in filename: + return False + return True + +def _frames_fmt(frames, full_filename=False, reverse=False): + if reverse: + frames = reversed(frames) + return [_frame_fmt(f, full_filename) for f in frames if _frame_filter(f['name'], f['filename'])] + +def _block_extra_legacy(b): + if 'history' in b: + frames = b['history'][0].get('frames', []) + real_size = b['history'][0]['real_size'] + else: + real_size = b.get('requested_size', b['size']) + frames = [] + return frames, real_size + +def _block_extra(b): + if 'frames' not in b: + # old snapshot format made it more complicated to get frames/allocated size + return _block_extra_legacy(b) + return b['frames'], b['requested_size'] + +def format_flamegraph(flamegraph_lines, flamegraph_script=None): + if flamegraph_script is None: + flamegraph_script = f'/tmp/{os.getuid()}_flamegraph.pl' + if not os.path.exists(flamegraph_script): + import urllib.request + print(f"Downloading flamegraph.pl to: {flamegraph_script}") + urllib.request.urlretrieve( + 'https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl', flamegraph_script) + subprocess.check_call(['chmod', '+x', flamegraph_script]) + args = [flamegraph_script, '--countname', 'bytes'] + p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding='utf-8') + assert p.stdin is not None + assert p.stdout is not None + p.stdin.write(flamegraph_lines) + p.stdin.close() + result = p.stdout.read() + p.stdout.close() + p.wait() + assert p.wait() == 0 + return result + +def _write_blocks(f, prefix, blocks): + def frames_fragment(frames): + if not frames: + return "" + return ';'.join(_frames_fmt(frames, reverse=True)) + for b in blocks: + if 'history' not in b: + frames, accounted_for_size = _block_extra(b) + f.write(f'{prefix};{b["state"]};{frames_fragment(frames)} {accounted_for_size}\n') + else: + accounted_for_size = 0 + for h in b['history']: + sz = h['real_size'] + accounted_for_size += sz + if 'frames' in h: + frames = h['frames'] + f.write(f'{prefix};{b["state"]};{frames_fragment(frames)} {sz}\n') + else: + f.write(f'{prefix};{b["state"]}; {sz}\n') + gaps = b['size'] - accounted_for_size + if gaps: + f.write(f'{prefix};{b["state"]}; {gaps}\n') + +def segments(snapshot, format_flamegraph=format_flamegraph): + f = io.StringIO() + for seg in snapshot['segments']: + prefix = f'stream_{seg["stream"]};seg_{seg["address"]}' + _write_blocks(f, prefix, seg['blocks']) + return format_flamegraph(f.getvalue()) + +def memory(snapshot, format_flamegraph=format_flamegraph): + f = io.StringIO() + for seg in snapshot['segments']: + prefix = f'stream_{seg["stream"]}' + _write_blocks(f, prefix, seg['blocks']) + return format_flamegraph(f.getvalue()) + +def compare(before, after, format_flamegraph=format_flamegraph): + def _seg_key(seg): + return (seg['address'], seg['total_size']) + + def _seg_info(seg): + return f'stream_{seg["stream"]};seg_{seg["address"]}' + + f = io.StringIO() + + before_segs = {_seg_key(seg) for seg in before} + after_segs = {_seg_key(seg) for seg in after} + + print(f'only_before = {[a for a,_ in (before_segs - after_segs)]}') + print(f'only_after = {[a for a,_ in (after_segs - before_segs)]}') + + for seg in before: + if _seg_key(seg) not in after_segs: + _write_blocks(f, f'only_before;{_seg_info(seg)}', seg['blocks']) + + for seg in after: + if _seg_key(seg) not in before_segs: + _write_blocks(f, f'only_after;{_seg_info(seg)}', seg['blocks']) + + return format_flamegraph(f.getvalue()) + +def _format_size(num): + # https://stackoverflow.com/questions/1094841/get-human-readable-version-of-file-size + for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]: + if abs(num) < 1024.0: + return f"{num:3.1f}{unit}B" + num /= 1024.0 + return f"{num:.1f}YiB" + +class Bytes: + def __init__(self, value): + self.value = value + + def __add__(self, rhs): + return Bytes(self.value + rhs) + + def __repr__(self): + return _format_size(self.value) + +def calc_active(seg): + return sum(b['size'] for b in seg['blocks'] if b['state'] == 'active_allocated') + +def _report_free(free_external, free_internal): + total = free_external + free_internal + suffix = '' + if total != 0: + pct = (free_internal / total) * 100 + suffix = f' ({pct:.1f}% internal)' + return f'{Bytes(total)}{suffix}' + +PAGE_SIZE = 1024 * 1024 * 20 +legend = f"""\ + +Legend: + [a ] - a segment in the allocator + ^-- a page {Bytes(PAGE_SIZE)} of memory in the segment + a-z: pages filled with a single block's content + ' ': page is completely free + *: page if completely full with multiple blocks + 0-9: page is partially full with tensors of multiple blocks (9 == 90% full) + (X% internal) - of the free memory, X% is free because we rounded the size of the allocation. +""" + +def segsum(data): + r"""Visually reports how the allocator has filled its segments. + + This printout can help debug fragmentation issues since free fragments + will appear as gaps in this printout. The amount of free space is reported + for each segment. + We distinguish between internal free memory which occurs because the + allocator rounds the allocation size, and external free memory, which are + the gaps between allocations in a segment. + Args: + data: snapshot dictionary created from _snapshot() + """ + segments = [] + out = io.StringIO() + out.write(f"Summary of segments >= {Bytes(PAGE_SIZE)} in size\n") + total_reserved = 0 + total_allocated = 0 + free_external = 0 + free_internal = 0 + for seg in sorted(data['segments'], key=lambda x: (x['total_size'], calc_active(x))): + total_reserved += seg['total_size'] + + seg_free_external = 0 + seg_free_internal = 0 + seg_allocated = 0 + all_ranges = [] + boffset = 0 + for b in seg['blocks']: + active = b['state'] == 'active_allocated' + if active: + _, allocated_size = _block_extra(b) + all_ranges.append((boffset, allocated_size, True)) + seg_allocated += allocated_size + seg_free_internal += b['size'] - allocated_size + else: + seg_free_external += b['size'] + + boffset += b['size'] + + total_allocated += seg_allocated + free_external += seg_free_external + free_internal += seg_free_internal + + nseg = (seg['total_size'] - 1) // PAGE_SIZE + 1 + occupied = [' ' for _ in range(nseg)] + frac = [0.0 for _ in range(nseg)] + active_size = 0 + for i, (start_, size, active) in enumerate(all_ranges): + active_size += size + finish_ = (start_ + size) + start = start_ // PAGE_SIZE + finish = (finish_ - 1) // PAGE_SIZE + 1 + m = chr(ord('a' if active else 'A') + (i % 26)) + for j in range(start, finish): + s = max(start_, j * PAGE_SIZE) + e = min(finish_, (j + 1) * PAGE_SIZE) + frac[j] += (e - s) / PAGE_SIZE + if occupied[j] != ' ': + occupied[j] = '0123456789*'[int(frac[j] * 10)] + else: + occupied[j] = m + stream = '' if seg['stream'] == 0 else f', stream_{seg["stream"]}' + body = ''.join(occupied) + assert seg_free_external + seg_free_internal + seg_allocated == seg['total_size'] + stream = f' stream_{seg["stream"]}' if seg['stream'] != 0 else '' + if seg['total_size'] >= PAGE_SIZE: + out.write(f'[{body}] {Bytes(seg["total_size"])} allocated, ' + f'{_report_free(seg_free_external, seg_free_internal)} free{stream}\n') + out.write(f'segments: {len(data["segments"])}\n') + out.write(f'total_reserved: {Bytes(total_reserved)}\n') + out.write(f'total_allocated: {Bytes(total_allocated)}\n') + internal_external = f' ({Bytes(free_internal)} internal + {Bytes(free_external)} external)' if free_internal else '' + out.write(f'total_free: {_report_free(free_external, free_internal)}\n') + out.write(legend) + assert free_internal + free_external + total_allocated == total_reserved + return out.getvalue() + +def trace(data): + out = io.StringIO() + + def format(entries): + segment_intervals : list = [] + segment_addr_to_name = {} + allocation_addr_to_name = {} + + free_names : list = [] + next_name = 0 + + def _name(): + nonlocal next_name + if free_names: + return free_names.pop() + r, m = next_name // 26, next_name % 26 + next_name += 1 + return f'{chr(ord("a") + m)}{"" if r == 0 else r}' + + def find_segment(addr): + for name, saddr, size in segment_intervals: + if addr >= saddr and addr < saddr + size: + return name, saddr + for i, seg in enumerate(data['segments']): + saddr = seg['address'] + size = seg['allocated_size'] + if addr >= saddr and addr < saddr + size: + return f'seg_{i}', saddr + return None, None + count = 0 + out.write(f'{len(entries)} entries\n') + + + total_reserved = 0 + for seg in data['segments']: + total_reserved += seg['total_size'] + + for count, e in enumerate(entries): + if e['action'] == 'alloc': + addr, size = e['addr'], e['size'] + n = _name() + seg_name, seg_addr = find_segment(addr) + if seg_name is None: + seg_name = "MEM" + offset = addr + else: + offset = addr - seg_addr + out.write(f'{n} = {seg_name}[{offset}:{Bytes(size)}]\n') + allocation_addr_to_name[addr] = (n, size, count) + count += size + elif e['action'] == 'free_requested': + addr, size = e['addr'], e['size'] + name, _, _ = allocation_addr_to_name.get(addr, (addr, None, None)) + out.write(f'del {name} # {Bytes(size)}\n') + elif e['action'] == 'free_completed': + addr, size = e['addr'], e['size'] + count -= size + name, _, _ = allocation_addr_to_name.get(addr, (addr, None, None)) + out.write(f'# free completed for {name} {Bytes(size)}\n') + if name in allocation_addr_to_name: + free_names.append(name) + del allocation_addr_to_name[name] + elif e['action'] == 'segment_alloc': + addr, size = e['addr'], e['size'] + name = _name() + out.write(f'{name} = cudaMalloc({addr}, {Bytes(size)})\n') + segment_intervals.append((name, addr, size)) + segment_addr_to_name[addr] = name + elif e['action'] == 'segment_free': + addr, size = e['addr'], e['size'] + name = segment_addr_to_name.get(addr, addr) + out.write(f'cudaFree({name}) # {Bytes(size)}\n') + if name in segment_addr_to_name: + free_names.append(name) + del segment_addr_to_name[name] + elif e['action'] == 'oom': + size = e['size'] + free = e['device_free'] + out.write(f'raise OutOfMemoryError() # {Bytes(size)} requested, {Bytes(free)} free in CUDA\n') + else: + out.write(f'{e}\n') + out.write(f"TOTAL MEM: {Bytes(count)}") + for i, d in enumerate(data['device_traces']): + if d: + out.write(f'Device {i} ----------------\n') + format(d) + return out.getvalue() + + +_memory_viz_template = r""" + + + + + + + +""" + +def _format_viz(data, viz_kind, device): + if device is not None: + warnings.warn('device argument is deprecated, plots now contain all device') + buffer = pickle.dumps(data) + buffer += b'\x00' * (3 - len(buffer) % 3) + # Encode the buffer with base64 + encoded_buffer = base64.b64encode(buffer).decode('utf-8') + + json_format = json.dumps([{"name": 'snapshot.pickle', "base64": encoded_buffer}]) + return _memory_viz_template.replace('$VIZ_KIND', repr(viz_kind)) \ + .replace('$SNAPSHOT', json_format) + +def trace_plot(data, device=None, plot_segments=False): + """Generate a visualization over time of the memory usage recorded by the trace as an html file. + + Args: + data: Memory snapshot as generated from torch.cuda.memory._snapshot() + device (torch.device, optional): Generate the trace for this device, needed if multiple devices have allocations. + plot_segments (bool, optional): Plots memory returned from cudaMalloc, rather than individual allocations. + Defaults to False. + + Returns: + str: HTML of visualization + """ + return _format_viz(data, 'Active Memory Timeline' if not plot_segments else 'Active Cached Memory Timeline', device) + + +def _profile_to_snapshot(profile): + import torch + from torch.profiler._memory_profiler import Action, TensorKey + from torch._C._profiler import _EventType + memory_profile = profile._memory_profile() + + allocation_stacks = {} + for event in memory_profile._op_tree.sorted_nodes: + if event.tag == _EventType.Allocation: + parent = event.parent + python_parents = [] + while parent: + if parent.tag in (_EventType.PyCall, _EventType.PyCCall): + python_parents.append(parent) + parent = parent.parent + key = TensorKey.from_allocation(event.extra_fields) + + # Corner case: If allocation doesn't have an ID (can't prove it was used as a Tensor) + # key will be None. I should add some way to identify these, I just haven't yet. + if key and event.extra_fields.alloc_size > 0: + allocation_stacks[key] = python_parents + + + device_count = torch.cuda.device_count() + snapshot = { + 'device_traces': [[] for _ in range(device_count + 1)], + 'segments': [{'device': device, + 'address': None, + 'total_size': 0, + 'stream': 0, + 'blocks': []} for device in range(device_count + 1)] + } + + def to_device(device): + if device.type == 'cuda': + return device.index + else: + return device_count + + def allocate(size, tensor_key, version, during_trace=True): + device = to_device(tensor_key.device) + addr = tensor_key.storage.ptr + + seg = snapshot['segments'][device] # type: ignore[index] + if seg['address'] is None or seg['address'] > addr: + seg['address'] = addr + seg['total_size'] = max(seg['total_size'], addr + size) # record max addr for now, we will make it the size later + category = memory_profile._categories.get(tensor_key, version) + category = category.name.lower() if category is not None else "unknown" + stack = allocation_stacks.get(tensor_key, ()) + stack = [{'filename': 'none', 'line': 0, 'name': p.name} for p in stack] + r = {'action': 'alloc', 'addr': addr, 'size': size, 'stream': 0, 'frames': stack, 'category': category} + if during_trace: + snapshot['device_traces'][device].append(r) # type: ignore[index] + return r + + def free(alloc, device): + for e in ('free_requested', 'free_completed'): + snapshot['device_traces'][device].append({'action': e, # type: ignore[index] + 'addr': alloc['addr'], + 'size': alloc['size'], + 'stream': 0, + 'frames': alloc['frames']}) + + kv_to_elem = {} + + + + # create the device trace + for time, action, (tensor_key, version), size in memory_profile.timeline: + if not isinstance(tensor_key, TensorKey): + continue + if action == Action.CREATE: + kv_to_elem[(tensor_key, version)] = allocate(size, tensor_key, version) + elif action == Action.DESTROY: + free(kv_to_elem.pop((tensor_key, version)), to_device(tensor_key.device)) + elif action == Action.INCREMENT_VERSION: + free(kv_to_elem.pop((tensor_key, version)), to_device(tensor_key.device)) + kv_to_elem[(tensor_key, version + 1)] = allocate(size, tensor_key, version + 1) + elif action == Action.PREEXISTING: + kv_to_elem[(tensor_key, version)] = allocate(size, tensor_key, version, during_trace=False) + + + # create the final snapshot state + blocks_at_end = [(to_device(tensor_key.device), event['addr'], event['size'], event['frames']) + for (tensor_key, version), event in kv_to_elem.items()] + for device, blocks in groupby(sorted(blocks_at_end), key=lambda x: x[0]): + seg = snapshot['segments'][device] # type: ignore[index] + last_addr = seg['address'] + for _, addr, size, frames in blocks: + if last_addr < addr: + seg['blocks'].append({'size': addr - last_addr, 'state': 'inactive'}) + seg['blocks'].append({'size': size, 'state': 'active_allocated', 'requested_size': size, 'frames': frames}) + last_addr = addr + size + if last_addr < seg['total_size']: + seg['blocks'].append({'size': seg['total_size'] - last_addr, 'state': 'inactive'}) + + snapshot['segments'] = [seg for seg in snapshot['segments'] if seg['blocks']] # type: ignore[attr-defined] + for seg in snapshot['segments']: # type: ignore[attr-defined, name-defined, no-redef] + seg['total_size'] -= seg['address'] + if not seg['blocks']: + seg['blocks'].append({'size': seg['total_size'], 'state': 'inactive'}) + + return snapshot + +def profile_plot(profile, device=None): + """Generate a visualization over time of the memory usage recorded by kineto memory profiling as an html file. + + Args: + profile: profile as generated by `torch.profiler.profile(profile_memory=True)` + device (torch.device, optional): Generate the trace for this device, needed if multiple devices have allocations. + + Returns: + str: HTML of visualization + """ + snapshot = _profile_to_snapshot(profile) + return _format_viz(snapshot, 'Active Memory Timeline', device) + + +def segment_plot(data: Any, device=None): + return _format_viz(data, 'Allocator State History', device) + +if __name__ == "__main__": + import os.path + thedir = os.path.realpath(os.path.dirname(__file__)) + if thedir in sys.path: + # otherwise we find cuda/random.py as random... + sys.path.remove(thedir) + import argparse + + fn_name = 'torch.cuda.memory._snapshot()' + pickled = f'pickled memory statistics from {fn_name}' + parser = argparse.ArgumentParser(description=f'Visualize memory dumps produced by {fn_name}') + + subparsers = parser.add_subparsers(dest='action') + + def _output(p): + p.add_argument('-o', '--output', default='output.svg', help='flamegraph svg (default: output.svg)') + + description = 'Prints overall allocation statistics and a visualization of how the allocators segments are currently filled.' + stats_a = subparsers.add_parser('stats', description=description) + stats_a.add_argument('input', help=pickled) + + description = 'Prints buffer of the most recent allocation events embedded in the snapshot in a Pythonic style.' + trace_a = subparsers.add_parser('trace', description=description) + trace_a.add_argument('input', help=pickled) + + description = 'Generate a flamegraph that visualizes what memory is stored in each allocator segment (aka block)' + segments_a = subparsers.add_parser('segments', description=description) + segments_a.add_argument('input', help=pickled) + _output(segments_a) + + description = "Generate a flamegraph the program locations contributing to CUDA memory usage." + memory_a = subparsers.add_parser('memory', description=description) + memory_a.add_argument('input', help=pickled) + _output(memory_a) + + description = 'Generate a flamegraph that shows segments (aka blocks) that have been added ' \ + 'or removed between two different memorys snapshots.' + compare_a = subparsers.add_parser('compare', description=description) + compare_a.add_argument('before', help=pickled) + compare_a.add_argument('after', help=pickled) + _output(compare_a) + + plots = ( + ("trace_plot", "Generate a visualization over time of the memory usage recorded by the trace as an html file."), + ("segment_plot", "Visualize how allocations are packed into allocator segments at each point in a trace as an html file.") + ) + for cmd, description in plots: + trace_plot_a = subparsers.add_parser(cmd, description=description) + trace_plot_a.add_argument('input', help=pickled) + help = 'visualize trace from this device (default: chooses the only device with trace info or errors)' + trace_plot_a.add_argument('-d', '--device', type=int, default=None, help=help) + help = 'path to save the visualization(default: output.html)' + trace_plot_a.add_argument('-o', '--output', default='output.html', help=help) + if cmd == "trace_plot": + help = 'visualize change to segments rather than individual allocations' + trace_plot_a.add_argument('-s', '--segments', action='store_true', help=help) + + + args = parser.parse_args() + + def _read(name): + if name == '-': + f = sys.stdin.buffer + else: + f = open(name, 'rb') + data = pickle.load(f) + if isinstance(data, list): # segments only... + data = {'segments': data, 'traces': []} + return data + + def _write(name, data): + with open(name, 'w') as f: + f.write(data) + + if args.action == 'segments': + data = _read(args.input) + _write(args.output, segments(data)) + elif args.action == 'memory': + data = _read(args.input) + _write(args.output, memory(data)) + elif args.action == 'stats': + data = _read(args.input) + print(segsum(data)) + elif args.action == 'trace': + data = _read(args.input) + print(trace(data)) + elif args.action == 'compare': + before = _read(args.before) + after = _read(args.after) + _write(args.output, compare(before, after)) + elif args.action == 'trace_plot': + data = _read(args.input) + _write(args.output, trace_plot(data, device=args.device, plot_segments=args.segments)) + elif args.action == 'segment_plot': + data = _read(args.input) + _write(args.output, segment_plot(data, device=args.device)) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/_sanitizer.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/_sanitizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c0b0297366db73b31bd15a5ba7b30d86164b3f31 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/_sanitizer.py @@ -0,0 +1,622 @@ +r""" +This module introduces CUDA Sanitizer, a tool for detecting synchronization errors between kernels ran on different streams. + +It stores information on accesses to tensors to determine if they are synchronized +or not. When enabled in a python program and a possible data race is detected, a +detailed warning will be printed and the program will exit. + +It can be enabled either by importing this module and calling +:func:`enable_cuda_sanitizer()` or by exporting the ``TORCH_CUDA_SANITIZER`` +environment variable. +""" + +import enum +import functools +import inspect +import io +import logging +import sys +import textwrap +import traceback +from dataclasses import dataclass, field +from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, TypeVar + +import torch +import torch.utils._cuda_trace as cuda_trace +from torch.utils import _pytree as pytree +from torch.utils._python_dispatch import TorchDispatchMode + + +DEFAULT_STREAM_ID = 0 + +TK = TypeVar("TK") +TVa = TypeVar("TVa") +TVb = TypeVar("TVb") + +DataPtr = int +StreamId = int +EventId = int +SeqNum = int + +logger = logging.getLogger(__name__) + + +class AccessType(enum.Enum): + READ = enum.auto() + WRITE = enum.auto() + + def __str__(self): + return "reading from" if self is AccessType.READ else "writing to" + + +@dataclass +class Access: + r"""Stores information about a single access to a tensor by a kernel. + + Args: + type: either AccessType.READ or AccessType.Write. + seq_num: the sequential number of the kernel performing the access. + stream: the stream id of the stream executing the kernel. + operator: the schema of the launched kernel, which lists the + arguments and return type. + aliases: the arguments in the schema this access corresponds to. + is_output: Whether the tensor was an output of the kernel. + stack_trace: the stack summary object captured during access. + """ + + type: AccessType + seq_num: SeqNum + stream: StreamId + operator: str + aliases: List[str] + is_output: bool + stack_trace: traceback.StackSummary + + +class SynchronizationError(Exception): + """Base class for errors detected by CUDA Sanitizer.""" + + pass + + +class UnsynchronizedAccessError(SynchronizationError): + """Stores information about two unsynchronized accesses to one data pointer.""" + + def __init__( + self, + data_ptr: DataPtr, + allocation_stack_trace: Optional[traceback.StackSummary], + current_access: Access, + previous_access: Access, + ): + self.data_ptr = data_ptr + self.allocation_stack_trace = allocation_stack_trace + self.current_access = current_access + self.previous_access = previous_access + + def __str__(self): + def format_access(access: Access): + message.write(f"{access.operator}\n{access.type}") + if access.aliases: + message.write(" argument(s) " + ", ".join(access.aliases)) + if access.is_output: + message.write(", and to") + if access.is_output: + message.write(" the output") + message.write( + f"\nWith stack trace:\n{''.join(access.stack_trace.format())}\n" + ) + + with io.StringIO() as message: + message.write( + textwrap.dedent( + f"""\ + ============================ + CSAN detected a possible data race on tensor with data pointer {self.data_ptr} + Access by stream {self.current_access.stream} during kernel: + """ + ) + ) + format_access(self.current_access) + + message.write( + f"Previous access by stream {self.previous_access.stream} during kernel:\n" + ) + format_access(self.previous_access) + + if self.allocation_stack_trace: + message.write( + "Tensor was allocated with stack trace:\n" + f"{''.join(self.allocation_stack_trace.format())}" + ) + else: + message.write("Trace for tensor allocation not found.") + return message.getvalue() + + +class CUDASanitizerErrors(Exception): + """Wrapper class for errors reported by CUDA Sanitizer.""" + + def __init__(self, errors: List[SynchronizationError]): + self.errors = errors + + def __str__(self): + return f"detected {len(self.errors)} errors" + + +@dataclass +class TensorInfo: + r"""Stores information about a single tensor and recent accesses to it. + + Args: + allocation_stack_trace: the stack summary object captured during tensor + allocation. Can be ``None`` if the allocation wasn't caught by CSAN. + reads: list of read accesses to the tensor that were performed since + the last write. + write: the last write access to the tensor. + """ + + allocation_stack_trace: Optional[traceback.StackSummary] + reads: List[Access] = field(default_factory=list) + write: Optional[Access] = None + + +class _TensorsAccessed: + def __init__(self): + self.accesses: Dict[DataPtr, TensorInfo] = {} + + def ensure_tensor_exists(self, data_ptr: DataPtr) -> None: + if data_ptr not in self.accesses: + logger.info( + "Found tensor with pointer: %s, but no matching tensor " + "allocation in the trace. Backfilling the trace now. " + "Perhaps the sanitizer was enabled after some torch operations?", + data_ptr, + ) + self.create_tensor(data_ptr, None) + + def ensure_tensor_does_not_exist(self, data_ptr: DataPtr) -> None: + if data_ptr in self.accesses: + logger.info( + "Found duplicate tensor allocation in the trace for tensor with " + "pointer: %s. Assuming the trace for tensor deallocation " + "wasn't caught and backfilling it now. " + "Perhaps the sanitizer was enabled after some torch operations?", + data_ptr, + ) + self.delete_tensor(data_ptr) + + def create_tensor( + self, data_ptr: DataPtr, stack_trace: Optional[traceback.StackSummary] + ) -> None: + self.accesses[data_ptr] = TensorInfo(stack_trace) + + def delete_tensor(self, data_ptr: DataPtr) -> None: + del self.accesses[data_ptr] + + def were_there_reads_since_last_write(self, data_ptr: DataPtr) -> bool: + return True if self.accesses[data_ptr].reads else False + + def get_allocation_stack_trace( + self, data_ptr: DataPtr + ) -> Optional[traceback.StackSummary]: + return self.accesses[data_ptr].allocation_stack_trace + + def get_write(self, data_ptr: DataPtr) -> Optional[Access]: + return self.accesses[data_ptr].write + + def get_reads(self, data_ptr: DataPtr) -> List[Access]: + return self.accesses[data_ptr].reads + + def add_read(self, data_ptr: DataPtr, access: Access) -> None: + self.accesses[data_ptr].reads.append(access) + + def set_write(self, data_ptr: DataPtr, access: Access) -> None: + self.accesses[data_ptr].write = access + self.accesses[data_ptr].reads = [] + + +class StreamSynchronizations: + def __init__(self): + self.current_sync_states: Dict[StreamId, Dict[StreamId, SeqNum]] = {} + self.recorded_sync_states: Dict[EventId, Dict[StreamId, SeqNum]] = {} + self.host_sync_state: Dict[StreamId, SeqNum] = {} + self.create_stream(DEFAULT_STREAM_ID) + + def _ensure_stream_exists(self, stream: StreamId) -> None: + if stream not in self.current_sync_states: + logger.info( + "Found Stream with id: %s, but no matching stream " + "creation in the trace. Backfilling the trace now. " + "Perhaps the sanitizer was enabled after some torch operations?", + stream, + ) + self.create_stream(stream) + + def _ensure_event_exists(self, event: EventId) -> None: + if event not in self.recorded_sync_states: + logger.info( + "Found Event with id: %s, but no matching event " + "creation in the trace. Backfilling the trace now. " + "Perhaps the sanitizer was enabled after some torch operations?", + event, + ) + self.create_event(event) + + def _ensure_event_does_not_exist(self, event: EventId) -> None: + if event in self.recorded_sync_states: + logger.info( + "Found duplicate event creation in the trace for event with " + "id: %s. Assuming the trace for event deletion wasn't caught " + "and backfilling it now. " + "Perhaps the sanitizer was enabled after some torch operations?", + event, + ) + self.delete_event(event) + + def create_stream(self, stream: StreamId) -> None: + if stream in self.current_sync_states: + logger.info( + "Found duplicate Stream creation in the trace for Stream with " + "id: %s. PyTorch Streams are only created once, so this " + "trace entry is ignored.", + stream, + ) + else: + self.host_sync_state[stream] = 0 + self.current_sync_states[stream] = self.host_sync_state.copy() + + def create_event(self, event: EventId) -> None: + self._ensure_event_does_not_exist(event) + self.recorded_sync_states[event] = {} + + def delete_event(self, event: EventId) -> None: + self._ensure_event_exists(event) + del self.recorded_sync_states[event] + + def update_seq_num(self, stream: StreamId, seq_num: SeqNum) -> None: + self._ensure_stream_exists(stream) + self.current_sync_states[stream][stream] = seq_num + + def record_state(self, event: EventId, stream: StreamId) -> None: + self._ensure_event_exists(event) + self._ensure_stream_exists(stream) + self.recorded_sync_states[event] = self.current_sync_states[stream].copy() + + def _state_wait_for_other( + self, state: Dict[StreamId, SeqNum], other: Dict[StreamId, SeqNum] + ) -> None: + for stream, seq_num in other.items(): + state[stream] = max(state.get(stream, -1), seq_num) + + def stream_wait_for_event(self, stream: StreamId, event: EventId) -> None: + self._ensure_stream_exists(stream) + self._ensure_event_exists(event) + self._state_wait_for_other( + self.current_sync_states[stream], self.recorded_sync_states[event] + ) + + def all_streams_wait_for_event(self, event: EventId) -> None: + self._ensure_event_exists(event) + for stream in self.current_sync_states.keys(): + self.stream_wait_for_event(stream, event) + + self._state_wait_for_other( + self.host_sync_state, self.recorded_sync_states[event] + ) + + def all_streams_wait_for_stream(self, stream: StreamId) -> None: + self._ensure_stream_exists(stream) + for state in self.current_sync_states.values(): + self._state_wait_for_other(state, self.current_sync_states[stream]) + + self._state_wait_for_other( + self.host_sync_state, self.current_sync_states[stream] + ) + + def sync_all_streams(self) -> None: + for stream, state in self.current_sync_states.items(): + self.host_sync_state[stream] = state[stream] + + for state in self.current_sync_states.values(): + self._state_wait_for_other(state, self.host_sync_state) + + def is_ordered_after( + self, current_stream: StreamId, seq_num: SeqNum, other_stream: StreamId + ) -> bool: + self._ensure_stream_exists(current_stream) + self._ensure_stream_exists(other_stream) + return seq_num <= self.current_sync_states[current_stream].get(other_stream, -1) + + +class EventHandler: + """Analyzes CSAN trace for synchronization errors. + + Stores information on each stream's synchronizations with other streams as well + as tensor accesses to determine whether a given kernel launch might cause a + data race. + """ + + def __init__(self): + self.tensors_accessed = _TensorsAccessed() + self.syncs = StreamSynchronizations() + self.seq_num: SeqNum = 0 + + def _handle_kernel_launch( + self, + stream: StreamId, + read_only: Set[DataPtr], + read_write: Set[DataPtr], + outputs: Set[DataPtr], + operator: str, + tensor_aliases: Dict[int, List[str]], + ) -> List[SynchronizationError]: + def check_conflict( + data_ptr: DataPtr, current_access: Access, previous_access: Optional[Access] + ) -> None: + if previous_access is None: + return + if not self.syncs.is_ordered_after( + current_access.stream, previous_access.seq_num, previous_access.stream + ): + error_list.append( + UnsynchronizedAccessError( + data_ptr, + self.tensors_accessed.get_allocation_stack_trace(data_ptr), + current_access, + previous_access, + ) + ) + + error_list: List[SynchronizationError] = [] + self.seq_num += 1 + self.syncs.update_seq_num(stream, self.seq_num) + stack_trace = traceback.StackSummary.extract( + traceback.walk_stack(inspect.currentframe()), lookup_lines=False + ) + # The stack trace generated in this way is in the inverse order, so it must be + # reversed. + stack_trace.reverse() + + for data_ptr in read_only: + self.tensors_accessed.ensure_tensor_exists(data_ptr) + current_access = Access( + AccessType.READ, + self.seq_num, + stream, + operator, + tensor_aliases[data_ptr], + data_ptr in outputs, + stack_trace, + ) + check_conflict( + data_ptr, current_access, self.tensors_accessed.get_write(data_ptr) + ) + self.tensors_accessed.add_read(data_ptr, current_access) + + for data_ptr in read_write: + self.tensors_accessed.ensure_tensor_exists(data_ptr) + current_access = Access( + AccessType.WRITE, + self.seq_num, + stream, + operator, + tensor_aliases[data_ptr], + data_ptr in outputs, + stack_trace, + ) + if self.tensors_accessed.were_there_reads_since_last_write(data_ptr): + for previous_access in self.tensors_accessed.get_reads(data_ptr): + check_conflict(data_ptr, current_access, previous_access) + else: + check_conflict( + data_ptr, current_access, self.tensors_accessed.get_write(data_ptr) + ) + self.tensors_accessed.set_write(data_ptr, current_access) + + return error_list + + def _handle_event_creation(self, event: EventId) -> None: + self.syncs.create_event(event) + + def _handle_event_deletion(self, event: EventId) -> None: + self.syncs.delete_event(event) + + def _handle_event_record(self, event: EventId, stream: StreamId) -> None: + self.syncs.record_state(event, stream) + + def _handle_event_wait(self, event: EventId, stream: StreamId) -> None: + self.syncs.stream_wait_for_event(stream, event) + + def _handle_memory_allocation(self, data_ptr: DataPtr) -> None: + self.tensors_accessed.ensure_tensor_does_not_exist(data_ptr) + stack_trace = traceback.StackSummary.extract( + traceback.walk_stack(inspect.currentframe()), lookup_lines=False + ) + # The stack trace generated in this way is in the inverse order, so it must be + # reversed. + stack_trace.reverse() + self.tensors_accessed.create_tensor( + data_ptr, + stack_trace, + ) + + def _handle_memory_deallocation(self, data_ptr: DataPtr) -> None: + self.tensors_accessed.ensure_tensor_exists(data_ptr) + self.tensors_accessed.delete_tensor(data_ptr) + + def _handle_stream_creation(self, stream: StreamId) -> None: + self.syncs.create_stream(stream) + + def _handle_device_synchronization(self) -> None: + self.syncs.sync_all_streams() + + def _handle_stream_synchronization(self, stream: StreamId) -> None: + self.syncs.all_streams_wait_for_stream(stream) + + def _handle_event_synchronization(self, event: EventId) -> None: + self.syncs.all_streams_wait_for_event(event) + + +def zip_by_key(a: Dict[TK, TVa], b: Dict[TK, TVb]) -> Iterator[Tuple[TK, TVa, TVb]]: + for arg, value in a.items(): + if arg in b: + yield arg, value, b[arg] + + +def zip_arguments( + schema: torch.FunctionSchema, args: Tuple[Any, ...], kwargs: Dict[str, Any] +) -> Iterator[Tuple[torch.Argument, Any]]: + schema_args = schema.arguments[: len(args)] + schema_kwargs = {arg.name: arg for arg in schema.arguments[len(args) :]} + + yield from zip(schema_args, args) + + for _, argument, value in zip_by_key(schema_kwargs, kwargs): + yield (argument, value) + + +class ArgumentHandler: + def __init__(self): + self.dataptrs_read: Set[DataPtr] = set() + self.dataptrs_written: Set[DataPtr] = set() + self.tensor_aliases: Dict[DataPtr, List[str]] = dict() + self.outputs: Set[DataPtr] = set() + + def _handle_argument( + self, + value: Any, + is_write: bool, + name: Optional[str] = None, + is_output: bool = False, + ) -> None: + if isinstance(value, torch.Tensor) and value.is_cuda: + data_ptr = value.data_ptr() + if is_write: + self.dataptrs_written.add(data_ptr) + else: + self.dataptrs_read.add(data_ptr) + + self.tensor_aliases.setdefault(data_ptr, []) + if name is not None: + self.tensor_aliases[data_ptr].append(name) + if is_output: + self.outputs.add(data_ptr) + + def parse_inputs( + self, + schema: torch.FunctionSchema, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + ) -> None: + for argument, value in zip_arguments(schema, args, kwargs): + is_write = argument.alias_info is not None and argument.alias_info.is_write + pytree.tree_map_( + functools.partial( + self._handle_argument, is_write=is_write, name=argument.name + ), + value, + ) + + def parse_outputs(self, outputs: Any) -> None: + pytree.tree_map_( + functools.partial(self._handle_argument, is_write=True, is_output=True), + outputs, + ) + + +class CUDASanitizerDispatchMode(TorchDispatchMode): + def __init__(self): + self.event_handler = EventHandler() + torch._C._activate_cuda_trace() + cuda_trace.register_callback_for_cuda_event_creation( + self.event_handler._handle_event_creation + ) + cuda_trace.register_callback_for_cuda_event_deletion( + self.event_handler._handle_event_deletion + ) + cuda_trace.register_callback_for_cuda_event_record( + self.event_handler._handle_event_record + ) + cuda_trace.register_callback_for_cuda_event_wait( + self.event_handler._handle_event_wait + ) + cuda_trace.register_callback_for_cuda_memory_allocation( + self.event_handler._handle_memory_allocation + ) + cuda_trace.register_callback_for_cuda_memory_deallocation( + self.event_handler._handle_memory_deallocation + ) + cuda_trace.register_callback_for_cuda_stream_creation( + self.event_handler._handle_stream_creation + ) + cuda_trace.register_callback_for_cuda_device_synchronization( + self.event_handler._handle_device_synchronization + ) + cuda_trace.register_callback_for_cuda_stream_synchronization( + self.event_handler._handle_stream_synchronization + ) + cuda_trace.register_callback_for_cuda_event_synchronization( + self.event_handler._handle_event_synchronization + ) + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + + argument_handler = ArgumentHandler() + argument_handler.parse_inputs(func._schema, args, kwargs) + + outputs = func(*args, **kwargs) + + argument_handler.parse_outputs(outputs) + errors = self.event_handler._handle_kernel_launch( + torch.cuda.current_stream().cuda_stream, + argument_handler.dataptrs_read - argument_handler.dataptrs_written, + argument_handler.dataptrs_written, + argument_handler.outputs, + func._schema, + argument_handler.tensor_aliases, + ) + if errors: + for error in errors: + print(error, file=sys.stderr) + raise CUDASanitizerErrors(errors) + + return outputs + + +class CUDASanitizer: + """Manages the lifetime of a CUDASanitizer dispatch mode object. + + The CUDASanitizer class wraps the entering/exiting functions of the dispatch mode + context manager in the enable function/destructor, respectively. This is to + explicitly set the lifetime of the dispatch mode object to that of the application. + This approach was deemed more elegant than using the atexit module. + """ + + def __init__(self): + self.dispatch = CUDASanitizerDispatchMode() + self.enabled = False + + def enable(self): + self.dispatch.__enter__() + self.enabled = True + + def __del__(self): + if self.enabled: + self.dispatch.__exit__(None, None, None) + + +def enable_cuda_sanitizer(): + """Enable CUDA Sanitizer. + + The sanitizer will begin to analyze low-level CUDA calls invoked by torch functions + for synchronization errors. All data races found will be printed to the standard + error output along with stack traces of suspected causes. For best results, the + sanitizer should be enabled at the very beginning of the program. + """ + cuda_sanitizer.enable() + + +cuda_sanitizer = CUDASanitizer() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1d0ee8830bd68c23115e7788bb7e1a0c220b1882 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/_utils.py @@ -0,0 +1,38 @@ +from typing import Any + +import torch + +# The _get_device_index has been moved to torch.utils._get_device_index +from torch._utils import _get_device_index as _torch_get_device_index + + +def _get_device_index( + device: Any, optional: bool = False, allow_cpu: bool = False +) -> int: + r"""Get the device index from :attr:`device`, which can be a torch.device object, a Python integer, or ``None``. + + If :attr:`device` is a torch.device object, returns the device index if it + is a CUDA device. Note that for a CUDA device without a specified index, + i.e., ``torch.device('cuda')``, this will return the current default CUDA + device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``, + CPU devices will be accepted and ``-1`` will be returned in this case. + + If :attr:`device` is a Python integer, it is returned as is. + + If :attr:`device` is ``None``, this will return the current default CUDA + device if :attr:`optional` is ``True``. + """ + if isinstance(device, int): + return device + if isinstance(device, str): + device = torch.device(device) + if isinstance(device, torch.device): + if allow_cpu: + if device.type not in ["cuda", "cpu"]: + raise ValueError(f"Expected a cuda or cpu device, but got: {device}") + elif device.type != "cuda": + raise ValueError(f"Expected a cuda device, but got: {device}") + if not torch.jit.is_scripting(): + if isinstance(device, torch.cuda.device): + return device.idx + return _torch_get_device_index(device, optional, allow_cpu) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..003cca1f0cd4e910d0c042cf94501aa17a1bb2f5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__init__.py @@ -0,0 +1,11 @@ +from .autocast_mode import autocast, custom_bwd, custom_fwd +from .common import amp_definitely_not_available +from .grad_scaler import GradScaler + +__all__ = [ + "amp_definitely_not_available", + "autocast", + "custom_bwd", + "custom_fwd", + "GradScaler", +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1aebcb830ce3d2de6fd331a66dcde7d9c6518d7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09f31dbe9234a4acf41d8888503d24520d90b7f0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..88ff04d86648806a21b180ae79e6a58bf5b22685 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py @@ -0,0 +1,144 @@ +import collections +import functools + +import torch + +try: + import numpy as np + + HAS_NUMPY = True +except ModuleNotFoundError: + np = None # type: ignore[assignment] +from typing import Any + +__all__ = ["autocast", "custom_fwd", "custom_bwd"] + + +class autocast(torch.amp.autocast_mode.autocast): + r"""See :class:`torch.autocast`. + + ``torch.cuda.amp.autocast(args...)`` is equivalent to ``torch.autocast("cuda", args...)`` + """ + + def __init__( + self, + enabled: bool = True, + dtype: torch.dtype = torch.float16, + cache_enabled: bool = True, + ): + if torch._jit_internal.is_scripting(): + self._enabled = enabled + self.device = "cuda" + self.fast_dtype = dtype + return + super().__init__( + "cuda", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled + ) + + def __enter__(self): + if torch._jit_internal.is_scripting(): + return self + return super().__enter__() + + # TODO: discuss a unified TorchScript-friendly API for autocast + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override] + if torch._jit_internal.is_scripting(): + return + return super().__exit__(exc_type, exc_val, exc_tb) + + def __call__(self, func): + if torch._jit_internal.is_scripting(): + return func + return super().__call__(func) + + +# Casts Tensors and containers of Tensors. Special-cases passthroughs for strings and np.ndarrays, which +# may be falsely detected as "Iterables." +def _cast(value, dtype): + if isinstance(value, torch.Tensor): + is_eligible = ( + value.is_floating_point() + and value.is_cuda + and (value.dtype is not torch.float64) + ) + return value.to(dtype) if is_eligible else value + elif isinstance(value, (str, bytes)): + return value + elif HAS_NUMPY and isinstance(value, np.ndarray): + return value + elif isinstance(value, collections.abc.Mapping): + return {_cast(k, dtype): _cast(v, dtype) for k, v in value.items()} + elif isinstance(value, collections.abc.Iterable): + iterable = (_cast(v, dtype) for v in value) + if isinstance(value, (list, tuple)): + return type(value)(iterable) + else: + return iterable + else: + return value + + +# custom_fwd is a decorator that may or may not be used with arguments, following +# https://github.com/dabeaz/python-cookbook/tree/master/src/9/defining_a_decorator_that_takes_an_optional_argument. +# this works: +# @custom_fwd +# def forward(...): +# this also works: +# @custom_fwd(cast_inputs=torch.float) +# def forward(...): +def custom_fwd(fwd=None, *, cast_inputs=None): + """ + Create a helper decorator for ``forward`` methods of custom autograd functions. + + Autograd functions are subclasses of :class:`torch.autograd.Function`. + See the :ref:`example page` for more detail. + + Args: + cast_inputs (:class:`torch.dtype` or None, optional, default=None): If not ``None``, + when ``forward`` runs in an autocast-enabled region, casts incoming + floating-point CUDA Tensors to the target dtype (non-floating-point Tensors are not affected), + then executes ``forward`` with autocast disabled. + If ``None``, ``forward``'s internal ops execute with the current autocast state. + + .. note:: + If the decorated ``forward`` is called outside an autocast-enabled region, + :func:`custom_fwd` is a no-op and ``cast_inputs`` has no effect. + """ + if fwd is None: + return functools.partial(custom_fwd, cast_inputs=cast_inputs) + + @functools.wraps(fwd) + def decorate_fwd(*args, **kwargs): + args[0]._dtype = torch.get_autocast_gpu_dtype() + if cast_inputs is None: + args[0]._fwd_used_autocast = torch.is_autocast_enabled() + return fwd(*args, **kwargs) + else: + autocast_context = torch.is_autocast_enabled() + args[0]._fwd_used_autocast = False + if autocast_context: + with autocast(enabled=False): + return fwd(*_cast(args, cast_inputs), **_cast(kwargs, cast_inputs)) + else: + return fwd(*args, **kwargs) + + return decorate_fwd + + +# Autograd ensures incoming gradients are the same type as forward outputs. Allowing a separate +# cast_inputs argument on custom_bwd is unnecessary and could cause errors if it doesn't match +# cast_inputs supplied to custom_fwd. +def custom_bwd(bwd): + """Create a helper decorator for backward methods of custom autograd functions. + + Autograd functions are subclasses of :class:`torch.autograd.Function`. + Ensures that ``backward`` executes with the same autocast state as ``forward``. + See the :ref:`example page` for more detail. + """ + + @functools.wraps(bwd) + def decorate_bwd(*args, **kwargs): + with autocast(enabled=args[0]._fwd_used_autocast, dtype=args[0]._dtype): + return bwd(*args, **kwargs) + + return decorate_bwd diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/common.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/common.py new file mode 100644 index 0000000000000000000000000000000000000000..c4e8c1cc99b00d63672e12f2908a82c899076306 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/common.py @@ -0,0 +1,9 @@ +from importlib.util import find_spec + +import torch + +__all__ = ["amp_definitely_not_available"] + + +def amp_definitely_not_available(): + return not (torch.cuda.is_available() or find_spec("torch_xla")) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py new file mode 100644 index 0000000000000000000000000000000000000000..0ebaa9bced2ca0a2758cc4211308b2ad51437833 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py @@ -0,0 +1,28 @@ +import torch +from torch.amp.grad_scaler import OptState + +__all__ = ["GradScaler", "OptState"] + + +class GradScaler(torch.amp.GradScaler): + r""" + See :class:`torch.amp.GradScaler`. + ``torch.cuda.amp.GradScaler(args...)`` is equivalent to ``torch.amp.GradScaler("cuda", args...)`` + """ + + def __init__( + self, + init_scale: float = 2.0**16, + growth_factor: float = 2.0, + backoff_factor: float = 0.5, + growth_interval: int = 2000, + enabled: bool = True, + ) -> None: + super().__init__( + "cuda", + init_scale=init_scale, + growth_factor=growth_factor, + backoff_factor=backoff_factor, + growth_interval=growth_interval, + enabled=enabled, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/comm.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/comm.py new file mode 100644 index 0000000000000000000000000000000000000000..2ea23c2072d86a61db643fcfbfb799e97267e5e9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/comm.py @@ -0,0 +1,18 @@ +# The functions here have been moved to torch.nn.parallel.comm +from torch.nn.parallel.comm import ( + broadcast, + broadcast_coalesced, + gather, + reduce_add, + reduce_add_coalesced, + scatter, +) + +__all__ = [ + "broadcast", + "broadcast_coalesced", + "reduce_add", + "reduce_add_coalesced", + "scatter", + "gather", +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/error.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/error.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/graphs.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/graphs.py new file mode 100644 index 0000000000000000000000000000000000000000..b3bfbab6ad1617386d77127c24450f46302e0ed6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/graphs.py @@ -0,0 +1,479 @@ +import gc +from typing import Optional + +import torch +from torch.utils import _pytree +from .._utils import _dummy_type + +if not hasattr(torch._C, "_CudaStreamBase"): + # Define dummy base classes + torch._C.__dict__["_CUDAGraph"] = _dummy_type("_CUDAGraph") + torch._C.__dict__["_graph_pool_handle"] = _dummy_type("_graph_pool_handle") + torch._C.__dict__["_cuda_isCurrentStreamCapturing"] = _dummy_type( + "_cuda_isCurrentStreamCapturing" + ) + +from torch._C import ( # noqa: F401 + _cuda_isCurrentStreamCapturing, + _CUDAGraph, + _graph_pool_handle, +) + + +def is_current_stream_capturing(): + r"""Return True if CUDA graph capture is underway on the current CUDA stream, False otherwise. + + If a CUDA context does not exist on the current device, returns False without initializing the context. + """ + return _cuda_isCurrentStreamCapturing() + + +# Python shim helps Sphinx process docstrings more reliably. +def graph_pool_handle(): + r"""Return an opaque token representing the id of a graph memory pool. + + See :ref:`Graph memory management`. + + .. warning:: + This API is in beta and may change in future releases. + """ + return _graph_pool_handle() + + +# Python shim helps Sphinx process docstrings more reliably. +class CUDAGraph(torch._C._CUDAGraph): + r"""Wrapper around a CUDA graph. + + .. warning:: + This API is in beta and may change in future releases. + """ + + def __new__(cls): + return super().__new__(cls) + + def capture_begin(self, pool=None, capture_error_mode="global"): + r"""Begin capturing CUDA work on the current stream. + + Typically, you shouldn't call ``capture_begin`` yourself. + Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`, + which call ``capture_begin`` internally. + + Arguments: + pool (optional): Token (returned by :func:`~torch.cuda.graph_pool_handle` or + :meth:`other_Graph_instance.pool()`) that hints this graph may share memory + with the indicated pool. See :ref:`Graph memory management`. + capture_error_mode (str, optional): specifies the cudaStreamCaptureMode for the graph capture stream. + Can be "global", "thread_local" or "relaxed". During cuda graph capture, some actions, such as cudaMalloc, + may be unsafe. "global" will error on actions in other threads, "thread_local" will only error for + actions in the current thread, and "relaxed" will not error on these actions. Do NOT change this setting + unless you're familiar with `cudaStreamCaptureMode `_ + """ # noqa: B950 + super().capture_begin(pool=pool, capture_error_mode=capture_error_mode) + + def capture_end(self): + r"""End CUDA graph capture on the current stream. + + After ``capture_end``, ``replay`` may be called on this instance. + + Typically, you shouldn't call ``capture_end`` yourself. + Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`, + which call ``capture_end`` internally. + """ + super().capture_end() + + def replay(self): + r"""Replay the CUDA work captured by this graph.""" + super().replay() + + def reset(self): + r"""Delete the graph currently held by this instance.""" + super().reset() + + def pool(self): + r"""Return an opaque token representing the id of this graph's memory pool. + + This id can optionally be passed to another graph's ``capture_begin``, + which hints the other graph may share the same memory pool. + """ + return super().pool() + + def enable_debug_mode(self): + r"""Enable debugging mode for CUDAGraph.debug_dump.""" + return super().enable_debug_mode() + + def debug_dump(self, debug_path): + r""" + Arguments: + debug_path (required): Path to dump the graph to. + + Calls a debugging function to dump the graph if the debugging is + enabled via CUDAGraph.enable_debug_mode() + """ + return super().debug_dump(debug_path) + + +class graph: + r"""Context-manager that captures CUDA work into a :class:`torch.cuda.CUDAGraph` object for later replay. + + See :ref:`CUDA Graphs ` for a general introduction, + detailed use, and constraints. + + Arguments: + cuda_graph (torch.cuda.CUDAGraph): Graph object used for capture. + pool (optional): Opaque token (returned by a call to :func:`~torch.cuda.graph_pool_handle()` or + :meth:`other_Graph_instance.pool()`) hinting this graph's capture + may share memory from the specified pool. See :ref:`Graph memory management`. + stream (torch.cuda.Stream, optional): If supplied, will be set as the current stream in the context. + If not supplied, ``graph`` sets its own internal side stream as the current stream in the context. + capture_error_mode (str, optional): specifies the cudaStreamCaptureMode for the graph capture stream. + Can be "global", "thread_local" or "relaxed". During cuda graph capture, some actions, such as cudaMalloc, + may be unsafe. "global" will error on actions in other threads, "thread_local" will only error for + actions in the current thread, and "relaxed" will not error on actions. Do NOT change this setting + unless you're familiar with `cudaStreamCaptureMode `_ + + .. note:: + For effective memory sharing, if you pass a ``pool`` used by a previous capture and the previous capture + used an explicit ``stream`` argument, you should pass the same ``stream`` argument to this capture. + + .. warning:: + This API is in beta and may change in future releases. + + .. _cudaStreamCaptureMode: + https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html#group__CUDART__STREAM_1g9d0535d93a214cbf126835257b16ba85 + """ # noqa: B950 + + default_capture_stream: Optional["torch.cuda.Stream"] = None + + def __init__( + self, + cuda_graph, + pool=None, + stream=None, + capture_error_mode: str = "global", + ): + # Lazy-init of default_capture_stream helps avoid circular-import errors. + # Not thread safe, but graphs already have the general (explicitly documented) + # restriction that only one capture may be underway at a time in the process. + if self.__class__.default_capture_stream is None: + self.__class__.default_capture_stream = torch.cuda.Stream() + + self.pool = () if pool is None else (pool,) + self.capture_stream = ( + stream if stream is not None else self.__class__.default_capture_stream + ) + assert self.capture_stream is not None + self.stream_ctx = torch.cuda.stream(self.capture_stream) + self.cuda_graph = cuda_graph + self.capture_error_mode = capture_error_mode + + def __enter__(self): + # Free as much memory as we can for the graph + torch.cuda.synchronize() + gc.collect() + torch.cuda.empty_cache() + + # Stackoverflow seems comfortable with this pattern + # https://stackoverflow.com/questions/26635684/calling-enter-and-exit-manually#39172487 + self.stream_ctx.__enter__() + + self.cuda_graph.capture_begin( + *self.pool, capture_error_mode=self.capture_error_mode + ) + + def __exit__(self, exc_type, exc_value, traceback): + self.cuda_graph.capture_end() + self.stream_ctx.__exit__(exc_type, exc_value, traceback) + # returning None should propagate exceptions from either capture_end or stream_ctx.__exit__() + + +def make_graphed_callables( + callables, sample_args, num_warmup_iters=3, allow_unused_input=False, pool=None +): + r"""Accept callables (functions or :class:`nn.Module`\ s) and returns graphed versions. + + Each graphed callable's forward pass runs its source callable's + forward CUDA work as a CUDA graph inside a single autograd node. + + The graphed callable's forward pass also appends + a backward node to the autograd graph. During backward, this node runs the + callable's backward work as a CUDA graph. + + Therefore, each graphed callable should be a drop-in replacement for its source callable + in an autograd-enabled training loop. + + See :ref:`Partial-network capture` for detailed use and constraints. + + If you pass a tuple of several callables, their captures will use the same memory pool. + See :ref:`Graph memory management` for when this is appropriate. + + Arguments: + callables (torch.nn.Module or Python function, or tuple of these): Callable or callables to graph. + See :ref:`Graph memory management` for when passing a tuple of callables + is appropriate. If you pass a tuple of callables, their order in the tuple must be the same order + they'll run in the live workload. + sample_args (tuple of Tensors, or tuple of tuples of Tensors): Samples args for each callable. + If a single callable was passed, ``sample_args`` must be a single tuple of argument Tensors. + If a tuple of callables was passed, ``sample_args`` must be tuple of tuples of argument Tensors. + num_warmup_iters (int): The number of warmup iterations. Currently, ``DataDistributedParallel`` needs + 11 iterations for warm up. Default: ``3``. + allow_unused_input (bool): If False, specifying inputs that were not used when computing outputs + (and therefore their grad is always zero) is an error. Defaults to False. + pool (optional): Token (returned by :func:`~torch.cuda.graph_pool_handle` or + :meth:`other_Graph_instance.pool()`) that hints this graph may share memory + with the indicated pool. See :ref:`Graph memory management`. + .. note:: + The ``requires_grad`` state of each Tensor in ``sample_args`` must match the state + that's expected for the corresponding real input in the training loop. + + .. warning:: + This API is in beta and may change in future releases. + + .. warning:: + ``sample_args`` for each callable must contain only Tensors. Other types are not allowed. + + .. warning:: + Returned callables do not support higher order differentiation (e.g., double backward). + + .. warning:: + In any :class:`~torch.nn.Module` passed to :func:`~make_graphed_callables`, only parameters + may be trainable. Buffers must have ``requires_grad=False``. + + .. warning:: + After you pass a :class:`torch.nn.Module` through :func:`~make_graphed_callables`, + you may not add or remove any of that Module's parameters or buffers. + + .. warning:: + :class:`torch.nn.Module`\s passed to :func:`~torch.cuda.make_graphed_callables` must not have module hooks + registered on them at the time they are passed. However, registering hooks on modules *after* passing them + through :func:`~torch.cuda.make_graphed_callables` is allowed. + + .. warning:: + When running a graphed callable, you must pass its arguments in the same order and format + they appeared in that callable's ``sample_args``. + + .. warning:: + The automatic mixed precision is supported in :func:`~torch.cuda.make_graphed_callables` only with disabled + caching. The context manager `torch.cuda.amp.autocast()` must have `cache_enabled=False`. + """ + if torch.is_autocast_enabled() and torch.is_autocast_cache_enabled(): + raise RuntimeError( + "make_graphed_callables does not support the autocast caching. Please set `cache_enabled=False`." + ) + + just_one_callable = False + + if not isinstance(callables, tuple): + just_one_callable = True + callables = (callables,) + sample_args = (sample_args,) + + flatten_sample_args = [] + + for c, args in zip(callables, sample_args): + if isinstance(c, torch.nn.Module): + assert ( + len(c._backward_hooks) == 0 + and len(c._forward_hooks) == 0 + and len(c._forward_pre_hooks) == 0 + ), ( + "Modules must not have hooks registered at the time they are passed. However, registering hooks " + + "on modules after passing them through make_graphed_callables is allowed." + ) + assert all(b.requires_grad is False for b in c.buffers()), ( + "In any :class:`~torch.nn.Module` passed to " + + ":func:`~make_graphed_callables`, only parameters may be trainable. All buffers must have " + + "``requires_grad=False``." + ) + flatten_arg = _pytree.arg_tree_leaves(*args) + flatten_sample_args.append(tuple(flatten_arg)) + assert all(isinstance(arg, torch.Tensor) for arg in flatten_arg), ( + "In the beta API, sample_args " + + "for each callable must contain only Tensors. Other types are not allowed." + ) + + # If a callable is an nn.Module, its graph's full input surface is the args the user explicitly + # passes to forward (ie, its sample_args) AND the module's parameter attributes. + per_callable_len_user_args = [len(args) for args in flatten_sample_args] + per_callable_module_params = [ + tuple(c.parameters()) if isinstance(c, torch.nn.Module) else () + for c in callables + ] + per_callable_static_input_surfaces = [ + flatten_sample_args[i] + per_callable_module_params[i] + for i in range(len(callables)) + ] + + fwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))] + bwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))] + + mempool = graph_pool_handle() if pool is None else pool + + # Warmup + # Hopefully prevents cudnn benchmarking and other lazy-initialization cuda work + # from ending up in any captures. + torch.cuda.synchronize() + with torch.cuda.stream(torch.cuda.Stream()): + for func, args, static_input_surface in zip( + callables, sample_args, per_callable_static_input_surfaces + ): + for _ in range(num_warmup_iters): + outputs = _pytree.tree_leaves(func(*args)) + grad_inputs = torch.autograd.grad( + outputs=tuple(o for o in outputs if o.requires_grad), + inputs=tuple(i for i in static_input_surface if i.requires_grad), + grad_outputs=tuple( + torch.empty_like(o) for o in outputs if o.requires_grad + ), + only_inputs=True, + allow_unused=allow_unused_input, + ) + del outputs, grad_inputs # type: ignore[possibly-undefined] + torch.cuda.synchronize() + + # All captures here share a mempool. To avoid replays corrupting each other's memory, + # the safest approach is to capture all passes in the same order they'll run: + # fwd 1, fwd 2, ... fwd N, then bwd N, bwd N-1, ... bwd 1. + + # Capture forward graphs + per_callable_static_outputs = [] + per_callable_output_unflatten_spec = [] + for func, args, fwd_graph in zip(callables, sample_args, fwd_graphs): + with torch.cuda.graph(fwd_graph, pool=mempool): + outputs = func(*args) + + flatten_outputs, spec = _pytree.tree_flatten(outputs) + per_callable_static_outputs.append(tuple(flatten_outputs)) + per_callable_output_unflatten_spec.append(spec) + + # Capture backward graphs in reverse order + per_callable_static_grad_outputs = [] + per_callable_static_grad_inputs = [] + for static_input_surface, static_outputs, bwd_graph, module_params in zip( + reversed(per_callable_static_input_surfaces), + reversed(per_callable_static_outputs), + reversed(bwd_graphs), + reversed(per_callable_module_params), + ): + # For now, assumes all static_outputs require grad + # assert all(o.requires_grad for o in static_outputs), "Outputs of graphed callables must require grad." + static_grad_outputs = tuple( + torch.empty_like(o) if o.requires_grad else None for o in static_outputs + ) + + with torch.cuda.graph(bwd_graph, pool=mempool): + grad_inputs = torch.autograd.grad( + outputs=tuple(o for o in static_outputs if o.requires_grad), + inputs=tuple(i for i in static_input_surface if i.requires_grad), + grad_outputs=tuple(o for o in static_grad_outputs if o is not None), + only_inputs=True, + allow_unused=allow_unused_input, + ) + + # Constructs a tuple suitable for returning from Graphed.backward: + # Pads out the actually-needed grads with Nones in gradient slots for inputs that don't require grad. + # I couldn't think of a slick one-liner for this pattern. + static_grad_inputs = [] + grad_idx = 0 + for arg in static_input_surface: + if arg.requires_grad: + static_grad_inputs.append(grad_inputs[grad_idx]) + grad_idx += 1 + else: + static_grad_inputs.append(None) # type: ignore[arg-type] + static_grad_inputs = tuple(static_grad_inputs) # type: ignore[assignment] + + per_callable_static_grad_outputs.append(static_grad_outputs) + per_callable_static_grad_inputs.append(static_grad_inputs) + + # Reverses the most recent two lists + per_callable_static_grad_outputs.reverse() + per_callable_static_grad_inputs.reverse() + # Now for every per_callable list, per_callable_*[i] holds the stuff for the ith callable. + + def make_graphed_autograd_function( + fwd_graph, + bwd_graph, + module_params, + len_user_args, + output_unflatten_spec, + static_input_surface, + static_outputs, + static_grad_outputs, + static_grad_inputs, + ): + class Graphed(torch.autograd.Function): + @staticmethod + def forward(ctx, *inputs): + # At this stage, only the user args may (potentially) be new tensors. + for i in range(len_user_args): + if static_input_surface[i].data_ptr() != inputs[i].data_ptr(): + static_input_surface[i].copy_(inputs[i]) + fwd_graph.replay() + assert isinstance(static_outputs, tuple) + return tuple(o.detach() for o in static_outputs) + + @staticmethod + @torch.autograd.function.once_differentiable + def backward(ctx, *grads): + assert len(grads) == len(static_grad_outputs) + for g, grad in zip(static_grad_outputs, grads): + if g is not None: + # don't copy if autograd gods have been kind and the + # incoming grad is already in the right place + if g.data_ptr() != grad.data_ptr(): + g.copy_(grad) + bwd_graph.replay() + + # Input args that didn't require grad expect a None gradient. + assert isinstance(static_grad_inputs, tuple) + return tuple( + b.detach() if b is not None else b for b in static_grad_inputs + ) + + def functionalized(*user_args): + # Runs the autograd function with inputs == all inputs to the graph that might require grad + # (explicit user args + module parameters) + # Assumes module params didn't change since capture. + flatten_user_args = _pytree.arg_tree_leaves(*user_args) + out = Graphed.apply(*(tuple(flatten_user_args) + module_params)) + return _pytree.tree_unflatten(out, output_unflatten_spec) + + return functionalized + + # Put together the final graphed callables + ret = [] + for i, func in enumerate(callables): + graphed = make_graphed_autograd_function( + fwd_graphs[i], + bwd_graphs[i], + per_callable_module_params[i], + per_callable_len_user_args[i], + per_callable_output_unflatten_spec[i], + per_callable_static_input_surfaces[i], + per_callable_static_outputs[i], + per_callable_static_grad_outputs[i], + per_callable_static_grad_inputs[i], + ) + + if isinstance(func, torch.nn.Module): + + def make_graphed_forward(func, graph_training_state, graphed, orig_fwd): + def new_fwd(*user_args): + # If the module's training-or-eval state matches what we graphed, + # run the graph, otherwise run the original forward method + if func.training == graph_training_state: + return graphed(*user_args) + else: + return orig_fwd(*user_args) + + return new_fwd + + func.forward = make_graphed_forward(func, func.training, graphed, func.forward) # type: ignore[assignment] + ret.append(func) + else: + ret.append(graphed) + + if just_one_callable: + return ret[0] + + return tuple(ret) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/jiterator.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/jiterator.py new file mode 100644 index 0000000000000000000000000000000000000000..25d25482419e635612855ed402fd02ef58709417 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/jiterator.py @@ -0,0 +1,185 @@ +import re +from typing import Callable, List + +import torch +from torch import Tensor + +__all__: List[str] = [] + + +class _CodeParser: + def __init__(self, code_string: str): + optional_ws = r"\s*" + required_ws = r"\s+" + template_params = r"(?P\<.+\>)" + return_type = r"(?P\w+)" + function_name = r"(?P\w+)" + function_params = r"(?P\(.+\))" + function_body = r"(?P\{.+\})" + + pattern = ( + optional_ws + + "template" + + optional_ws + + template_params + + optional_ws + + return_type + + required_ws + + function_name + + optional_ws + + function_params + + optional_ws + + function_body + + optional_ws + ) + + result = re.match( + pattern, code_string, re.DOTALL + ) # DOTALL for matching multiline + + if result is None: + raise Exception( + f"Couldn't parse code, please check correctness:\n {code_string}" + ) + + self.template_params = result["template_params"] + self.return_type = result["return_type"] + self.function_name = result["function_name"] + self.function_params = result["function_params"] + self.function_body = result["function_body"] + + +class _JittedFunction: + def __init__( + self, code_string: str, return_by_ref: bool, num_outputs: int, **kwargs + ): + self.code_string = code_string + + assert ( + return_by_ref or num_outputs == 1 + ), "Return by value only works for single output. " + self.return_by_ref = return_by_ref + self.num_outputs = num_outputs + + parsed_code = _CodeParser(code_string) + self.kernel_name = parsed_code.function_name + + self.kwargs_dict = kwargs + self.is_cuda_available = torch.cuda.is_available() + + def __call__(self, *tensors: Tensor, **kwargs): + # Jiterator follow torch.cuda's lazy initialization behavior + # Defer checking cuda's availability at the function invocation time + assert ( + self.is_cuda_available + ), "Jiterator is only supported on CUDA and ROCm GPUs, none are available." + + assert len(tensors) <= 8, "jiterator only supports up to 8 tensor inputs." + + expanded_kwargs = self.kwargs_dict.copy() + for key, value in kwargs.items(): + if key in self.kwargs_dict: + expanded_kwargs[key] = value + else: + raise KeyError(f"{key} is not declared in function definition") + + return torch._C._cuda_jiterator_compile_and_launch_kernel( + self.code_string, + self.kernel_name, + self.return_by_ref, + self.num_outputs, + tensors, + expanded_kwargs, + ) + + +def _create_jit_fn(code_string: str, **kwargs) -> Callable: + """ + Create a jiterator-generated cuda kernel for an elementwise op. + + The code string has to be a valid CUDA function that describes the computation for a single element. The code + string has to follow the c++ template pattern, as shown in the example below. This function will be inlined + into elementwise kernel template, and compiled on the fly. Compiled kernel will be cached in memory, as well as + local temp dir. + + Jiterator-generated kernels accepts noncontiguous tensors, and supports broadcasting and type promotion. + + Args: + code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return by value. + kwargs (Dict, optional): Keyword arguments for generated function + + Example:: + + code_string = "template T my_kernel(T x, T y, T alpha) { return -x + alpha * y; }" + jitted_fn = create_jit_fn(code_string, alpha=1.0) + a = torch.rand(3, device='cuda') + b = torch.rand(3, device='cuda') + # invoke jitted function like a regular python function + result = jitted_fn(a, b, alpha=3.14) + + code_string also allows multiple function definitions, and the last function will be treated as the entry function. + + Example:: + + code_string = "template T util_fn(T x, T y) { return ::sin(x) + ::cos(y); }" + code_string += "template T my_kernel(T x, T y, T val) { return ::min(val, util_fn(x, y)); }" + jitted_fn = create_jit_fn(code_string, val=0.0) + a = torch.rand(3, device='cuda') + b = torch.rand(3, device='cuda') + # invoke jitted function like a regular python function + result = jitted_fn(a, b) # using default val=0.0 + + Jiterator can be used together with python registration to override an operator's cuda kernel. + Following example is overriding gelu's cuda kernel with relu. + + Example:: + + code_string = "template T my_gelu(T a) { return a > 0 ? a : 0; }" + my_gelu = create_jit_fn(code_string) + my_lib = torch.library.Library("aten", "IMPL") + my_lib.impl('aten::gelu', my_gelu, "CUDA") + # torch.nn.GELU and torch.nn.function.gelu are now overridden + a = torch.rand(3, device='cuda') + torch.allclose(torch.nn.functional.gelu(a), torch.nn.functional.relu(a)) + + .. warning:: + This API is in beta and may change in future releases. + + .. warning:: + This API only supports up to 8 inputs and 1 output + + .. warning:: + All input tensors must live in CUDA device + """ + return _JittedFunction(code_string, return_by_ref=False, num_outputs=1, **kwargs) + + +def _create_multi_output_jit_fn( + code_string: str, num_outputs: int, **kwargs +) -> Callable: + """ + Create a jiterator-generated cuda kernel for an elementwise op that supports returning one or more outputs. + + Args: + code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return value by reference. + num_outputs(int): number of outputs return by the kernel + kwargs (Dict, optional): Keyword arguments for generated function + + Example:: + + code_string = "template void my_kernel(T x, T y, T alpha, T& out) { out = -x + alpha * y; }" + jitted_fn = create_jit_fn(code_string, alpha=1.0) + a = torch.rand(3, device='cuda') + b = torch.rand(3, device='cuda') + # invoke jitted function like a regular python function + result = jitted_fn(a, b, alpha=3.14) + + .. warning:: + This API is in beta and may change in future releases. + + .. warning:: + This API only supports up to 8 inputs and 8 outputs + """ + return _JittedFunction( + code_string, return_by_ref=True, num_outputs=num_outputs, **kwargs + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/memory.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..60440c58dc1d057b744fc91a6254757b74839225 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/memory.py @@ -0,0 +1,914 @@ +r"""This package adds support for device memory management implemented in CUDA.""" + +import collections +import contextlib +import ctypes +import pickle +import sys +import warnings +from inspect import signature + +from typing import Any, Dict, Optional, Tuple, Union + +import torch +from torch import _C + +from torch.types import Device +from .._utils import _dummy_type +from . import _get_device_index, _get_nvml_device_index, _lazy_init, is_initialized + +from ._memory_viz import memory as _memory, segments as _segments + +__all__ = [ + "caching_allocator_alloc", + "caching_allocator_delete", + "set_per_process_memory_fraction", + "empty_cache", + "memory_stats", + "memory_stats_as_nested_dict", + "reset_accumulated_memory_stats", + "reset_peak_memory_stats", + "reset_max_memory_allocated", + "reset_max_memory_cached", + "memory_allocated", + "max_memory_allocated", + "memory_reserved", + "max_memory_reserved", + "memory_cached", + "max_memory_cached", + "memory_snapshot", + "memory_summary", + "list_gpu_processes", + "mem_get_info", + "get_allocator_backend", + "CUDAPluggableAllocator", + "change_current_allocator", +] + + +if not hasattr(torch._C, "_cuda_CUDAAllocator"): + # Define dummy base classes + torch._C.__dict__["_cuda_CUDAAllocator"] = _dummy_type("_cuda_CUDAAllocator") + + +def _host_allocator(): + _lazy_init() + return torch._C._cuda_cudaHostAllocator() + + +@contextlib.contextmanager +def _free_mutex(): + torch._C._cuda_lock_mutex() + try: + yield + finally: + torch._C._cuda_unlock_mutex() + + +def caching_allocator_alloc(size, device: Union[Device, int] = None, stream=None): + r"""Perform a memory allocation using the CUDA memory allocator. + + Memory is allocated for a given device and a stream, this + function is intended to be used for interoperability with other + frameworks. Allocated memory is released through + :func:`~torch.cuda.caching_allocator_delete`. + + Args: + size (int): number of bytes to be allocated. + device (torch.device or int, optional): selected device. If it is + ``None`` the default CUDA device is used. + stream (torch.cuda.Stream or int, optional): selected stream. If is ``None`` then + the default stream for the selected device is used. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + if device is None: + device = torch.cuda.current_device() + device = _get_device_index(device) + if stream is None: + stream = torch.cuda.current_stream(device) + if isinstance(stream, torch.cuda.streams.Stream): + stream = stream.cuda_stream + if not isinstance(stream, int): + raise TypeError( + "Invalid type for stream argument, must be " + "`torch.cuda.Stream` or `int` representing a pointer " + "to a existing stream" + ) + with torch.cuda.device(device): + return torch._C._cuda_cudaCachingAllocator_raw_alloc(size, stream) + + +def caching_allocator_delete(mem_ptr): + r"""Delete memory allocated using the CUDA memory allocator. + + Memory allocated with :func:`~torch.cuda.caching_allocator_alloc`. + is freed here. The associated device and stream are tracked inside + the allocator. + + Args: + mem_ptr (int): memory address to be freed by the allocator. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + torch._C._cuda_cudaCachingAllocator_raw_delete(mem_ptr) + + +def set_per_process_memory_fraction( + fraction, device: Union[Device, int] = None +) -> None: + r"""Set memory fraction for a process. + + The fraction is used to limit an caching allocator to allocated memory on a CUDA device. + The allowed value equals the total visible memory multiplied fraction. + If trying to allocate more than the allowed value in a process, will raise an out of + memory error in allocator. + + Args: + fraction(float): Range: 0~1. Allowed memory equals total_memory * fraction. + device (torch.device or int, optional): selected device. If it is + ``None`` the default CUDA device is used. + .. note:: + In general, the total available free memory is less than the total capacity. + """ + _lazy_init() + if device is None: + device = torch.cuda.current_device() + device = _get_device_index(device) + if not isinstance(fraction, float): + raise TypeError("Invalid type for fraction argument, must be `float`") + if fraction < 0 or fraction > 1: + raise ValueError(f"Invalid fraction value: {fraction}. Allowed range: 0~1") + + torch._C._cuda_setMemoryFraction(fraction, device) + + +def empty_cache() -> None: + r"""Release all unoccupied cached memory currently held by the caching + allocator so that those can be used in other GPU application and visible in + `nvidia-smi`. + + .. note:: + :func:`~torch.cuda.empty_cache` doesn't increase the amount of GPU + memory available for PyTorch. However, it may help reduce fragmentation + of GPU memory in certain cases. See :ref:`cuda-memory-management` for + more details about GPU memory management. + """ + if is_initialized(): + torch._C._cuda_emptyCache() + + +def memory_stats(device: Union[Device, int] = None) -> Dict[str, Any]: + r"""Return a dictionary of CUDA memory allocator statistics for a given device. + + The return value of this function is a dictionary of statistics, each of + which is a non-negative integer. + + Core statistics: + + - ``"allocated.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + number of allocation requests received by the memory allocator. + - ``"allocated_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + amount of allocated memory. + - ``"segment.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + number of reserved segments from ``cudaMalloc()``. + - ``"reserved_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + amount of reserved memory. + - ``"active.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + number of active memory blocks. + - ``"active_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + amount of active memory. + - ``"inactive_split.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + number of inactive, non-releasable memory blocks. + - ``"inactive_split_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + amount of inactive, non-releasable memory. + + For these core statistics, values are broken down as follows. + + Pool type: + + - ``all``: combined statistics across all memory pools. + - ``large_pool``: statistics for the large allocation pool + (as of October 2019, for size >= 1MB allocations). + - ``small_pool``: statistics for the small allocation pool + (as of October 2019, for size < 1MB allocations). + + Metric type: + + - ``current``: current value of this metric. + - ``peak``: maximum value of this metric. + - ``allocated``: historical total increase in this metric. + - ``freed``: historical total decrease in this metric. + + In addition to the core statistics, we also provide some simple event + counters: + + - ``"num_alloc_retries"``: number of failed ``cudaMalloc`` calls that + result in a cache flush and retry. + - ``"num_ooms"``: number of out-of-memory errors thrown. + + The caching allocator can be configured via ENV to not split blocks larger than a + defined size (see Memory Management section of the Cuda Semantics documentation). + This helps avoid memory fragmentation but may have a performance + penalty. Additional outputs to assist with tuning and evaluating impact: + + - ``"max_split_size"``: blocks above this size will not be split. + - ``"oversize_allocations.{current,peak,allocated,freed}"``: + number of over-size allocation requests received by the memory allocator. + - ``"oversize_segments.{current,peak,allocated,freed}"``: + number of over-size reserved segments from ``cudaMalloc()``. + + The caching allocator can be configured via ENV to round memory allocations in order + to reduce fragmentation. Sometimes the overhead from rounding can be higher than + the fragmentation it helps reduce. The following stat can be used to check if + rounding adds too much overhead: + + - ``"requested_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + memory requested by client code, compare this with allocated_bytes to check if + allocation rounding adds too much overhead. + + Args: + device (torch.device or int, optional): selected device. Returns + statistics for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + + .. note:: + With :ref:`backend:cudaMallocAsync`, some stats are not + meaningful, and are always reported as zero. + """ + result = [] + + def _recurse_add_to_result(prefix, obj): + if isinstance(obj, dict): + if len(prefix) > 0: + prefix += "." + for k, v in obj.items(): + _recurse_add_to_result(prefix + k, v) + else: + result.append((prefix, obj)) + + stats = memory_stats_as_nested_dict(device=device) + _recurse_add_to_result("", stats) + result.sort() + + return collections.OrderedDict(result) + + +def memory_stats_as_nested_dict(device: Union[Device, int] = None) -> Dict[str, Any]: + r"""Return the result of :func:`~torch.cuda.memory_stats` as a nested dictionary.""" + if not is_initialized(): + return {} + device = _get_device_index(device, optional=True) + return torch._C._cuda_memoryStats(device) + + +def reset_accumulated_memory_stats(device: Union[Device, int] = None) -> None: + r"""Reset the "accumulated" (historical) stats tracked by the CUDA memory allocator. + + See :func:`~torch.cuda.memory_stats` for details. Accumulated stats correspond to + the `"allocated"` and `"freed"` keys in each individual stat dict, as well as + `"num_alloc_retries"` and `"num_ooms"`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + return torch._C._cuda_resetAccumulatedMemoryStats(device) + + +def reset_peak_memory_stats(device: Union[Device, int] = None) -> None: + r"""Reset the "peak" stats tracked by the CUDA memory allocator. + + See :func:`~torch.cuda.memory_stats` for details. Peak stats correspond to the + `"peak"` key in each individual stat dict. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + return torch._C._cuda_resetPeakMemoryStats(device) + + +def reset_max_memory_allocated(device: Union[Device, int] = None) -> None: + r"""Reset the starting point in tracking maximum GPU memory occupied by tensors for a given device. + + See :func:`~torch.cuda.max_memory_allocated` for details. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. warning:: + This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets + /all/ peak memory stats. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + warnings.warn( + "torch.cuda.reset_max_memory_allocated now calls torch.cuda.reset_peak_memory_stats, " + "which resets /all/ peak memory stats.", + FutureWarning, + ) + return reset_peak_memory_stats(device=device) + + +def reset_max_memory_cached(device: Union[Device, int] = None) -> None: + r"""Reset the starting point in tracking maximum GPU memory managed by the caching allocator for a given device. + + See :func:`~torch.cuda.max_memory_cached` for details. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. warning:: + This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets + /all/ peak memory stats. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + warnings.warn( + "torch.cuda.reset_max_memory_cached now calls torch.cuda.reset_peak_memory_stats, " + "which resets /all/ peak memory stats.", + FutureWarning, + ) + return reset_peak_memory_stats(device=device) + + +def memory_allocated(device: Union[Device, int] = None) -> int: + r"""Return the current GPU memory occupied by tensors in bytes for a given device. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + This is likely less than the amount shown in `nvidia-smi` since some + unused memory can be held by the caching allocator and some context + needs to be created on GPU. See :ref:`cuda-memory-management` for more + details about GPU memory management. + """ + return memory_stats(device=device).get("allocated_bytes.all.current", 0) + + +def max_memory_allocated(device: Union[Device, int] = None) -> int: + r"""Return the maximum GPU memory occupied by tensors in bytes for a given device. + + By default, this returns the peak allocated memory since the beginning of + this program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to + reset the starting point in tracking this metric. For example, these two + functions can measure the peak allocated memory usage of each iteration in a + training loop. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + return memory_stats(device=device).get("allocated_bytes.all.peak", 0) + + +def memory_reserved(device: Union[Device, int] = None) -> int: + r"""Return the current GPU memory managed by the caching allocator in bytes for a given device. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + return memory_stats(device=device).get("reserved_bytes.all.current", 0) + + +def max_memory_reserved(device: Union[Device, int] = None) -> int: + r"""Return the maximum GPU memory managed by the caching allocator in bytes for a given device. + + By default, this returns the peak cached memory since the beginning of this + program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to reset + the starting point in tracking this metric. For example, these two functions + can measure the peak cached memory amount of each iteration in a training + loop. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + return memory_stats(device=device).get("reserved_bytes.all.peak", 0) + + +def memory_cached(device: Union[Device, int] = None) -> int: + r"""Deprecated; see :func:`~torch.cuda.memory_reserved`.""" + warnings.warn( + "torch.cuda.memory_cached has been renamed to torch.cuda.memory_reserved", + FutureWarning, + ) + return memory_reserved(device=device) + + +def max_memory_cached(device: Union[Device, int] = None) -> int: + r"""Deprecated; see :func:`~torch.cuda.max_memory_reserved`.""" + warnings.warn( + "torch.cuda.max_memory_cached has been renamed to torch.cuda.max_memory_reserved", + FutureWarning, + ) + return max_memory_reserved(device=device) + + +def memory_snapshot(): + r"""Return a snapshot of the CUDA memory allocator state across all devices. + + Interpreting the output of this function requires familiarity with the + memory allocator internals. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + return torch._C._cuda_memorySnapshot()["segments"] + + +def memory_summary(device: Union[Device, int] = None, abbreviated: bool = False) -> str: + r"""Return a human-readable printout of the current memory allocator statistics for a given device. + + This can be useful to display periodically during training, or when + handling out-of-memory exceptions. + + Args: + device (torch.device or int, optional): selected device. Returns + printout for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + abbreviated (bool, optional): whether to return an abbreviated summary + (default: False). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + stats = memory_stats(device=device) + + def _format_size(sz, pref_sz): + prefixes = ["B ", "KiB", "MiB", "GiB", "TiB", "PiB"] + prefix = prefixes[0] + for new_prefix in prefixes[1:]: + if pref_sz < 768 * 1024: + break + prefix = new_prefix + sz //= 1024 + pref_sz /= 1024 + return f"{sz:6d} {prefix}" + + def _format_count(cnt, pref_cnt): + prefixes = [" ", "K", "M"] + prefix = prefixes[0] + for new_prefix in prefixes[1:]: + if pref_cnt < 750 * 1000: + break + prefix = new_prefix + cnt //= 1000 + pref_cnt /= 1000 + return f"{cnt:7d} {prefix} " + + metrics_to_display = [ + ("allocated_bytes", "Allocated memory", _format_size), + ("active_bytes", "Active memory", _format_size), + ("requested_bytes", "Requested memory", _format_size), + ("reserved_bytes", "GPU reserved memory", _format_size), + ("inactive_split_bytes", "Non-releasable memory", _format_size), + ("allocation", "Allocations", _format_count), + ("active", "Active allocs", _format_count), + ("segment", "GPU reserved segments", _format_count), + ("inactive_split", "Non-releasable allocs", _format_count), + ] + + lines = [] + lines.append("=" * 75) + lines.append(" {_:16} PyTorch CUDA memory summary, device ID {device:<17d} ") + lines.append("-" * 75) + lines.append( + " {_:9} CUDA OOMs: {num_ooms:<12d} | {_:6} cudaMalloc retries: {num_alloc_retries:<8d} " + ) + lines.append("=" * 75) + lines.append( + " Metric | Cur Usage | Peak Usage | Tot Alloc | Tot Freed " + ) + + for metric_key, metric_name, formatter in metrics_to_display: + lines.append("-" * 75) + submetrics = [("all", metric_name)] + if not abbreviated: + submetrics.append(("large_pool", " from large pool")) + submetrics.append(("small_pool", " from small pool")) + + current_prefval, peak_prefval, allocated_prefval, freed_prefval = ( + None, + None, + None, + None, + ) + + for submetric_key, submetric_name in submetrics: + prefix = metric_key + "." + submetric_key + "." + + current = stats[prefix + "current"] + peak = stats[prefix + "peak"] + allocated = stats[prefix + "allocated"] + freed = stats[prefix + "freed"] + + if current_prefval is None: + current_prefval = current + peak_prefval = peak + allocated_prefval = allocated + freed_prefval = freed + + lines.append( + " {:<21} | {} | {} | {} | {} ".format( + submetric_name, + formatter(current, current_prefval), + formatter(peak, peak_prefval), + formatter(allocated, allocated_prefval), + formatter(freed, freed_prefval), + ), + ) + + metrics_to_display = [ + ("oversize_allocations", "Oversize allocations", _format_count), + ("oversize_segments", "Oversize GPU segments", _format_count), + ] + + for metric_key, metric_name, formatter in metrics_to_display: + lines.append("-" * 75) + + prefix = metric_key + "." + + current = stats[prefix + "current"] + peak = stats[prefix + "peak"] + allocated = stats[prefix + "allocated"] + freed = stats[prefix + "freed"] + + lines.append( + " {:<21} | {} | {} | {} | {} ".format( + metric_name, + formatter(current, current), + formatter(peak, peak), + formatter(allocated, allocated), + formatter(freed, freed), + ), + ) + + lines.append("=" * 75) + + fmt_dict = {"_": "", "device": device} + for k, v in stats.items(): + fmt_dict[k.replace(".", "-")] = v + return "|" + "|\n|".join(lines).format(**fmt_dict) + "|\n" + + +def list_gpu_processes(device: Union[Device, int] = None) -> str: + r"""Return a human-readable printout of the running processes and their GPU memory use for a given device. + + This can be useful to display periodically during training, or when + handling out-of-memory exceptions. + + Args: + device (torch.device or int, optional): selected device. Returns + printout for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + """ + try: + import pynvml # type: ignore[import] + except ModuleNotFoundError: + return "pynvml module not found, please install pynvml" + from pynvml import NVMLError_DriverNotLoaded + + try: + pynvml.nvmlInit() + except NVMLError_DriverNotLoaded: + return "cuda driver can't be loaded, is cuda enabled?" + device = _get_nvml_device_index(device) + handle = pynvml.nvmlDeviceGetHandleByIndex(device) + procs = pynvml.nvmlDeviceGetComputeRunningProcesses(handle) + lines = [] + lines.append(f"GPU:{device}") + if len(procs) == 0: + lines.append("no processes are running") + for p in procs: + mem = p.usedGpuMemory / (1024 * 1024) + lines.append(f"process {p.pid:>10d} uses {mem:>12.3f} MB GPU memory") + return "\n".join(lines) + + +def mem_get_info(device: Union[Device, int] = None) -> Tuple[int, int]: + r"""Return the global free and total GPU memory for a given device using cudaMemGetInfo. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more + details about GPU memory management. + """ + if device is None: + device = torch.cuda.current_device() + device = _get_device_index(device) + return torch.cuda.cudart().cudaMemGetInfo(device) + + +def _record_memory_history_legacy( + enabled: bool, + record_context=True, + trace_alloc_max_entries=1, + trace_alloc_record_context=False, + device: Union[Device, int] = None, + record_context_cpp=False, +): + _C._cuda_record_memory_history_legacy( + enabled, + record_context, + trace_alloc_max_entries, + trace_alloc_record_context, + record_context_cpp, + ) + + +def _record_memory_history(enabled="all", *args, **kwargs): + """Enable recording of stack traces associated with memory + allocations, so you can tell what allocated any piece of memory in + :func:`torch.cuda.memory._snapshot()`. + + In addition too keeping stack traces with each current allocation and free, + this will also enable recording of a history of all alloc/free events. + + Use :func:`torch.cuda.memory._snapshot()` to retrieve this information, + and the tools in `_memory_viz.py` to visualize snapshots. + + The Python trace collection is fast (2us per trace), so you may consider + enabling this on production jobs if you anticipate ever having to debug + memory issues. + + C++ trace collection is also fast (~50ns/frame), which for many typical programs + works out to ~2us per trace, but can vary depending on stack depth. + + Args: + enabled (Literal[None, "state", "all"], optional): + `None`, disable recording memory history. + `"state"`, keep information for currenly allocated memory. + `"all"`, additionally keep a history of all alloc/free calls. + Defaults to "all". + context (Literal[None, "state", "alloc", "all"], optional): + `None`, Do not record any tracebacks. + `"state"`, Record tracebacks for currently allocated memory. + `"alloc"`, additionally keep tracebacks for alloc calls. + `"all"`, additionally keep tracebacks for free calls. + Defaults to "all". + stacks (Literal["python", "all"], optional): + `"python"`, include Python, TorchScript, and inductor frames in tracebacks + `"all"`, additionally include C++ frames + Defaults to "all". + max_entries (int, optional): Keep a maximum of `max_entries` + alloc/free events in the recorded history recorded. + """ + if isinstance(enabled, bool): + return _record_memory_history_legacy(enabled, *args, **kwargs) + else: + return _record_memory_history_impl(enabled, *args, **kwargs) + + +def _record_memory_history_impl( + enabled: Optional[str] = "all", + context: Optional[str] = "all", + stacks: str = "all", + max_entries: int = sys.maxsize, + device: Union[Device, int] = None, +): + _C._cuda_record_memory_history(enabled, context, stacks, max_entries) + + +_record_memory_history.__signature__ = signature(_record_memory_history_impl) # type: ignore[attr-defined] + + +def _snapshot(device: Union[Device, int] = None): + """Save a snapshot of CUDA memory state at the time it was called. + + The state is represented as a dictionary with the following structure. + + .. code-block:: python + + class Snapshot(TypedDict): + segments : List[Segment] + device_traces: List[List[TraceEntry]] + + class Segment(TypedDict): + # Segments are memory returned from a cudaMalloc call. + # The size of reserved memory is the sum of all Segments. + # Segments are cached and reused for future allocations. + # If the reuse is smaller than the segment, the segment + # is split into more then one Block. + # empty_cache() frees Segments that are entirely inactive. + address: int + total_size: int # cudaMalloc'd size of segment + stream: int + segment_type: Literal['small', 'large'] # 'large' (>1MB) + allocated_size: int # size of memory in use + active_size: int # size of memory in use or in active_awaiting_free state + blocks : List[Block] + + class Block(TypedDict): + # A piece of memory returned from the allocator, or + # current cached but inactive. + size: int + requested_size: int # size requested during malloc, may be smaller than + # size due to rounding + address: int + state: Literal['active_allocated', # used by a tensor + 'active_awaiting_free', # waiting for another stream to finish using + # this, then it will become free + 'inactive',] # free for reuse + frames: List[Frame] # stack trace from where the allocation occurred + + class Frame(TypedDict): + filename: str + line: int + name: str + + class TraceEntry(TypedDict): + # When `torch.cuda.memory._record_memory_history()` is enabled, + # the snapshot will contain TraceEntry objects that record each + # action the allocator took. + action: Literal[ + 'alloc' # memory allocated + 'free_requested', # the allocated received a call to free memory + 'free_completed', # the memory that was requested to be freed is now + # able to be used in future allocation calls + 'segment_alloc', # the caching allocator ask cudaMalloc for more memory + # and added it as a segment in its cache + 'segment_free', # the caching allocator called cudaFree to return memory + # to cuda possibly trying free up memory to + # allocate more segments or because empty_caches was called + 'oom', # the allocator threw an OOM exception. 'size' is + # the requested number of bytes that did not succeed + 'snapshot' # the allocator generated a memory snapshot + # useful to coorelate a previously taken + # snapshot with this trace + ] + addr: int # not present for OOM + frames: List[Frame] + size: int + stream: int + device_free: int # only present for OOM, the amount of + # memory cuda still reports to be free + + Returns: + The Snapshot dictionary object + """ + return _C._cuda_memorySnapshot() + + +def _dump_snapshot(filename="dump_snapshot.pickle"): + """ + Save a pickled version of the `torch.memory._snapshot()` dictionary to a file. + + This file can be opened by the interactive snapshot viewer at pytorch.org/memory_viz + + Args: + filename (str, optional): Name of the file to create. Defaults to "dump_snapshot.pickle". + """ + s = _snapshot() + with open(filename, "wb") as f: + pickle.dump(s, f) + + +def _save_segment_usage(filename="output.svg", snapshot=None): + if snapshot is None: + snapshot = _snapshot() + with open(filename, "w") as f: + f.write(_segments(snapshot)) + + +def _save_memory_usage(filename="output.svg", snapshot=None): + if snapshot is None: + snapshot = _snapshot() + with open(filename, "w") as f: + f.write(_memory(snapshot)) + + +def _set_allocator_settings(env: str): + return torch._C._cuda_cudaCachingAllocator_set_allocator_settings(env) + + +def get_allocator_backend() -> str: + r"""Return a string describing the active allocator backend as set by + ``PYTORCH_CUDA_ALLOC_CONF``. Currently available backends are + ``native`` (PyTorch's native caching allocator) and `cudaMallocAsync`` + (CUDA's built-in asynchronous allocator). + + .. note:: + See :ref:`cuda-memory-management` for details on choosing the allocator backend. + """ + return torch._C._cuda_getAllocatorBackend() + + +class _CUDAAllocator: + r"""Wrapper over internal CUDA memory allocators.""" + + def __init__(self, allocator: torch._C._cuda_CUDAAllocator): + self._allocator = allocator + + def allocator(self): + return self._allocator + + +class CUDAPluggableAllocator(_CUDAAllocator): + r"""CUDA memory allocator loaded from a so file.""" + + def __init__(self, path_to_so_file: str, alloc_fn_name: str, free_fn_name: str): + r"""Memory allocators are compiled in .so files and loaded dynamically using ctypes. + + To change the active allocator use the :func:`torch.memory.cuda.change_current_allocator` function. + + Args: + path_to_so_file(str): Path in the filesystem to the `.so` file containing + the allocator functions + alloc_fn_name(str): Name of the function to perform the memory allocation + in the so file. The signature must be: + void* alloc_fn_name(ssize_t size, int device, cudaStream_t stream); + free_fn_name(str): Name of the function to perform the memory release + in the so file. The signature must be: + void free_fn_name(void* ptr, size_t size, cudaStream_t stream); + + .. warning:: + This is currently supported only in unix OSs + + .. note:: + See :ref:`cuda-memory-management` for details on creating and using a custom allocator + """ + allocator = ctypes.CDLL(path_to_so_file) + alloc_fn = ctypes.cast(getattr(allocator, alloc_fn_name), ctypes.c_void_p).value + free_fn = ctypes.cast(getattr(allocator, free_fn_name), ctypes.c_void_p).value + assert alloc_fn is not None + assert free_fn is not None + self._allocator = torch._C._cuda_customAllocator(alloc_fn, free_fn) + + +def change_current_allocator(allocator: _CUDAAllocator) -> None: + r"""Change the currently used memory allocator to be the one provided. + + If the current allocator has already been used/initialized, this function will error. + + + Args: + allocator (torch.cuda.memory._CUDAAllocator): allocator to be set as the active one. + .. note:: + See :ref:`cuda-memory-management` for details on creating and using a custom allocator + """ + torch._C._cuda_changeCurrentAllocator(allocator.allocator()) + + +def _get_current_allocator() -> _CUDAAllocator: + r"""Return the allocator being currently used. + + .. note:: + See :ref:`cuda-memory-management` for details on creating and using a custom allocator + """ + return _CUDAAllocator(torch._C._cuda_getAllocator()) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/nccl.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/nccl.py new file mode 100644 index 0000000000000000000000000000000000000000..05751ab5f87b7042426454e83541d3bebe1861fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/nccl.py @@ -0,0 +1,137 @@ +import collections +import warnings +from typing import Optional, Sequence, Union + +import torch.cuda + + +__all__ = ["all_reduce", "reduce", "broadcast", "all_gather", "reduce_scatter"] + +SUM = 0 # ncclRedOp_t + + +def is_available(tensors): + if not hasattr(torch._C, "_nccl_all_reduce"): + warnings.warn("PyTorch is not compiled with NCCL support") + return False + + devices = set() + for tensor in tensors: + if tensor.is_sparse: + return False + if not tensor.is_contiguous(): + return False + if not tensor.is_cuda: + return False + device = tensor.get_device() + if device in devices: + return False + devices.add(device) + + return True + + +def version(): + ver = torch._C._nccl_version() + major = ver >> 32 + minor = (ver >> 16) & 65535 + patch = ver & 65535 + suffix = torch._C._nccl_version_suffix().decode("utf-8") + if suffix == "": + return (major, minor, patch) + else: + return (major, minor, patch, suffix) + + +def unique_id(): + return torch._C._nccl_unique_id() + + +def init_rank(num_ranks, uid, rank): + return torch._C._nccl_init_rank(num_ranks, uid, rank) + + +def _check_sequence_type(inputs: Union[torch.Tensor, Sequence[torch.Tensor]]) -> None: + if not isinstance(inputs, collections.abc.Container) or isinstance( + inputs, torch.Tensor + ): + raise TypeError("Inputs should be a collection of tensors") + + +def all_reduce(inputs, outputs=None, op=SUM, streams=None, comms=None): + _check_sequence_type(inputs) + if outputs is None: + outputs = inputs + _check_sequence_type(outputs) + torch._C._nccl_all_reduce(inputs, outputs, op, streams, comms) + + +# `output` used to be `outputs`, taking in a list of tensors. So we have two +# arguments for BC reasons. +def reduce( + inputs: Sequence[torch.Tensor], + output: Optional[Union[torch.Tensor, Sequence[torch.Tensor]]] = None, + root: int = 0, + op: int = SUM, + streams: Optional[Sequence[torch.cuda.Stream]] = None, + comms=None, + *, + outputs: Optional[Sequence[torch.Tensor]] = None, +) -> None: + _check_sequence_type(inputs) + _output: torch.Tensor + if outputs is not None: + if output is not None: + raise ValueError( + "'output' and 'outputs' can not be both specified. 'outputs' is deprecated in " + "favor of 'output', taking in a single output tensor. The signature of reduce is: " + "reduce(inputs, output=None, root=0, op=SUM, streams=None, comms=None)." + ) + else: + warnings.warn( + "nccl.reduce with an output tensor list is deprecated. " + "Please specify a single output tensor with argument 'output' instead instead." + ) + _output = outputs[root] + elif not isinstance(output, torch.Tensor) and isinstance( + output, collections.abc.Sequence + ): + # User called old API with positional arguments of list of output tensors. + warnings.warn( + "nccl.reduce with an output tensor list is deprecated. " + "Please specify a single output tensor." + ) + _output = output[root] + else: + _output = inputs[root] if output is None else output + torch._C._nccl_reduce(inputs, _output, root, op, streams, comms) + + +def broadcast( + inputs: Sequence[torch.Tensor], root: int = 0, streams=None, comms=None +) -> None: + _check_sequence_type(inputs) + torch._C._nccl_broadcast(inputs, root, streams, comms) + + +def all_gather( + inputs: Sequence[torch.Tensor], + outputs: Sequence[torch.Tensor], + streams=None, + comms=None, +) -> None: + _check_sequence_type(inputs) + _check_sequence_type(outputs) + torch._C._nccl_all_gather(inputs, outputs, streams, comms) + + +def reduce_scatter( + inputs: Sequence[torch.Tensor], + outputs: Sequence[torch.Tensor], + op: int = SUM, + streams=None, + comms=None, +) -> None: + _check_sequence_type(inputs) + _check_sequence_type(outputs) + torch._C._nccl_reduce_scatter(inputs, outputs, op, streams, comms) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/nvtx.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/nvtx.py new file mode 100644 index 0000000000000000000000000000000000000000..4b902c0c6d4d76c6d584ed4d0ad1cc71a3f9cc6d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/nvtx.py @@ -0,0 +1,91 @@ +r"""This package adds support for NVIDIA Tools Extension (NVTX) used in profiling.""" + +from contextlib import contextmanager + +try: + from torch._C import _nvtx +except ImportError: + + class _NVTXStub: + @staticmethod + def _fail(*args, **kwargs): + raise RuntimeError( + "NVTX functions not installed. Are you sure you have a CUDA build?" + ) + + rangePushA = _fail + rangePop = _fail + markA = _fail + + _nvtx = _NVTXStub() # type: ignore[assignment] + +__all__ = ["range_push", "range_pop", "range_start", "range_end", "mark", "range"] + + +def range_push(msg): + """ + Push a range onto a stack of nested range span. Returns zero-based depth of the range that is started. + + Args: + msg (str): ASCII message to associate with range + """ + return _nvtx.rangePushA(msg) + + +def range_pop(): + """Pop a range off of a stack of nested range spans. Returns the zero-based depth of the range that is ended.""" + return _nvtx.rangePop() + + +def range_start(msg) -> int: + """ + Mark the start of a range with string message. It returns an unique handle + for this range to pass to the corresponding call to rangeEnd(). + + A key difference between this and range_push/range_pop is that the + range_start/range_end version supports range across threads (start on one + thread and end on another thread). + + Returns: A range handle (uint64_t) that can be passed to range_end(). + + Args: + msg (str): ASCII message to associate with the range. + """ + return _nvtx.rangeStartA(msg) + + +def range_end(range_id) -> None: + """ + Mark the end of a range for a given range_id. + + Args: + range_id (int): an unique handle for the start range. + """ + _nvtx.rangeEnd(range_id) + + +def mark(msg): + """ + Describe an instantaneous event that occurred at some point. + + Args: + msg (str): ASCII message to associate with the event. + """ + return _nvtx.markA(msg) + + +@contextmanager +def range(msg, *args, **kwargs): + """ + Context manager / decorator that pushes an NVTX range at the beginning + of its scope, and pops it at the end. If extra arguments are given, + they are passed as arguments to msg.format(). + + Args: + msg (str): message to associate with the range + """ + range_push(msg.format(*args, **kwargs)) + try: + yield + finally: + range_pop() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/profiler.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..51c8aa46f714b6a9fd30857c9edb575614d52420 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/profiler.py @@ -0,0 +1,61 @@ +import contextlib +import tempfile + +import torch +from . import check_error, cudart + +__all__ = ["init", "start", "stop", "profile"] + +DEFAULT_FLAGS = [ + "gpustarttimestamp", + "gpuendtimestamp", + "gridsize3d", + "threadblocksize", + "streamid", + "enableonstart 0", + "conckerneltrace", +] + + +def init(output_file, flags=None, output_mode="key_value"): + rt = cudart() + if not hasattr(rt, "cudaOutputMode"): + raise AssertionError("HIP does not support profiler initialization!") + if ( + hasattr(torch.version, "cuda") + and torch.version.cuda is not None + and int(torch.version.cuda.split(".")[0]) >= 12 + ): + # Check https://github.com/pytorch/pytorch/pull/91118 + # cudaProfilerInitialize is no longer needed after CUDA 12 + raise AssertionError("CUDA12+ does not need profiler initialization!") + flags = DEFAULT_FLAGS if flags is None else flags + if output_mode == "key_value": + output_mode_enum = rt.cudaOutputMode.KeyValuePair + elif output_mode == "csv": + output_mode_enum = rt.cudaOutputMode.CSV + else: + raise RuntimeError( + "supported CUDA profiler output modes are: key_value and csv" + ) + with tempfile.NamedTemporaryFile(delete=True) as f: + f.write(b"\n".join(f.encode("ascii") for f in flags)) + f.flush() + check_error(rt.cudaProfilerInitialize(f.name, output_file, output_mode_enum)) + + +def start(): + check_error(cudart().cudaProfilerStart()) + + +def stop(): + check_error(cudart().cudaProfilerStop()) + + +@contextlib.contextmanager +def profile(): + try: + start() + yield + finally: + stop() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/random.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/random.py new file mode 100644 index 0000000000000000000000000000000000000000..1cf33114d17bd1867dfc5e5bb9179670291878a2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/random.py @@ -0,0 +1,179 @@ +from typing import Iterable, List, Union + +import torch +from .. import Tensor +from . import _lazy_call, _lazy_init, current_device, device_count + +__all__ = [ + "get_rng_state", + "get_rng_state_all", + "set_rng_state", + "set_rng_state_all", + "manual_seed", + "manual_seed_all", + "seed", + "seed_all", + "initial_seed", +] + + +def get_rng_state(device: Union[int, str, torch.device] = "cuda") -> Tensor: + r"""Return the random number generator state of the specified GPU as a ByteTensor. + + Args: + device (torch.device or int, optional): The device to return the RNG state of. + Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device). + + .. warning:: + This function eagerly initializes CUDA. + """ + _lazy_init() + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device("cuda", device) + idx = device.index + if idx is None: + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + return default_generator.get_state() + + +def get_rng_state_all() -> List[Tensor]: + r"""Return a list of ByteTensor representing the random number states of all devices.""" + results = [] + for i in range(device_count()): + results.append(get_rng_state(i)) + return results + + +def set_rng_state( + new_state: Tensor, device: Union[int, str, torch.device] = "cuda" +) -> None: + r"""Set the random number generator state of the specified GPU. + + Args: + new_state (torch.ByteTensor): The desired state + device (torch.device or int, optional): The device to set the RNG state. + Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device). + """ + with torch._C._DisableFuncTorch(): + new_state_copy = new_state.clone(memory_format=torch.contiguous_format) + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device("cuda", device) + + def cb(): + idx = device.index + if idx is None: + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + default_generator.set_state(new_state_copy) + + _lazy_call(cb) + + +def set_rng_state_all(new_states: Iterable[Tensor]) -> None: + r"""Set the random number generator state of all devices. + + Args: + new_states (Iterable of torch.ByteTensor): The desired state for each device. + """ + for i, state in enumerate(new_states): + set_rng_state(state, i) + + +def manual_seed(seed: int) -> None: + r"""Set the seed for generating random numbers for the current GPU. + + It's safe to call this function if CUDA is not available; in that + case, it is silently ignored. + + Args: + seed (int): The desired seed. + + .. warning:: + If you are working with a multi-GPU model, this function is insufficient + to get determinism. To seed all GPUs, use :func:`manual_seed_all`. + """ + seed = int(seed) + + def cb(): + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + default_generator.manual_seed(seed) + + _lazy_call(cb, seed=True) + + +def manual_seed_all(seed: int) -> None: + r"""Set the seed for generating random numbers on all GPUs. + + It's safe to call this function if CUDA is not available; in that + case, it is silently ignored. + + Args: + seed (int): The desired seed. + """ + seed = int(seed) + + def cb(): + for i in range(device_count()): + default_generator = torch.cuda.default_generators[i] + default_generator.manual_seed(seed) + + _lazy_call(cb, seed_all=True) + + +def seed() -> None: + r"""Set the seed for generating random numbers to a random number for the current GPU. + + It's safe to call this function if CUDA is not available; in that + case, it is silently ignored. + + .. warning:: + If you are working with a multi-GPU model, this function will only initialize + the seed on one GPU. To initialize all GPUs, use :func:`seed_all`. + """ + + def cb(): + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + default_generator.seed() + + _lazy_call(cb) + + +def seed_all() -> None: + r"""Set the seed for generating random numbers to a random number on all GPUs. + + It's safe to call this function if CUDA is not available; in that + case, it is silently ignored. + """ + + def cb(): + random_seed = 0 + seeded = False + for i in range(device_count()): + default_generator = torch.cuda.default_generators[i] + if not seeded: + default_generator.seed() + random_seed = default_generator.initial_seed() + seeded = True + else: + default_generator.manual_seed(random_seed) + + _lazy_call(cb) + + +def initial_seed() -> int: + r"""Return the current random seed of the current GPU. + + .. warning:: + This function eagerly initializes CUDA. + """ + _lazy_init() + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + return default_generator.initial_seed() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/sparse.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..f37a34118d2d8f73437dee54337a666df1b99a09 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/sparse.py @@ -0,0 +1 @@ +# The Tensor classes are added to this module by python_tensor.cpp diff --git a/llmeval-env/lib/python3.10/site-packages/torch/cuda/streams.py b/llmeval-env/lib/python3.10/site-packages/torch/cuda/streams.py new file mode 100644 index 0000000000000000000000000000000000000000..22d541f4e2879ace0cc78766d8a8c1795b0bc4a1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/cuda/streams.py @@ -0,0 +1,241 @@ +import ctypes + +import torch +from torch._streambase import _EventBase, _StreamBase +from .._utils import _dummy_type + + +if not hasattr(torch._C, "_CudaStreamBase"): + # Define dummy base classes + torch._C.__dict__["_CudaStreamBase"] = _dummy_type("_CudaStreamBase") + torch._C.__dict__["_CudaEventBase"] = _dummy_type("_CudaEventBase") + + +class Stream(torch._C._CudaStreamBase, _StreamBase): + r"""Wrapper around a CUDA stream. + + A CUDA stream is a linear sequence of execution that belongs to a specific + device, independent from other streams. See :ref:`cuda-semantics` for + details. + + Args: + device(torch.device or int, optional): a device on which to allocate + the stream. If :attr:`device` is ``None`` (default) or a negative + integer, this will use the current device. + priority(int, optional): priority of the stream, should be 0 or + negative, where negative numbers indicate higher priority. By default, + streams have priority 0. + + """ + + def __new__(cls, device=None, priority=0, **kwargs): + # setting device manager is expensive, so we avoid it unless necessary + if device is None or ("stream_id" in kwargs and "device_index" in kwargs): + return super().__new__(cls, priority=priority, **kwargs) + else: + with torch.cuda.device(device): + return super().__new__(cls, priority=priority, **kwargs) + + def wait_event(self, event): + r"""Make all future work submitted to the stream wait for an event. + + Args: + event (torch.cuda.Event): an event to wait for. + + .. note:: This is a wrapper around ``cudaStreamWaitEvent()``: see + `CUDA Stream documentation`_ for more info. + + This function returns without waiting for :attr:`event`: only future + operations are affected. + + .. _CUDA Stream documentation: + https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html + """ + event.wait(self) + + def wait_stream(self, stream): + r"""Synchronize with another stream. + + All future work submitted to this stream will wait until all kernels + submitted to a given stream at the time of call complete. + + Args: + stream (Stream): a stream to synchronize. + + .. note:: This function returns without waiting for currently enqueued + kernels in :attr:`stream`: only future operations are affected. + """ + self.wait_event(stream.record_event()) + + def record_event(self, event=None): + r"""Record an event. + + Args: + event (torch.cuda.Event, optional): event to record. If not given, a new one + will be allocated. + + Returns: + Recorded event. + """ + if event is None: + event = Event() + event.record(self) + return event + + def query(self): + r"""Check if all the work submitted has been completed. + + Returns: + A boolean indicating if all kernels in this stream are completed. + """ + return super().query() + + def synchronize(self): + r"""Wait for all the kernels in this stream to complete. + + .. note:: This is a wrapper around ``cudaStreamSynchronize()``: see + `CUDA Stream documentation`_ for more info. + """ + super().synchronize() + + @property + def _as_parameter_(self): + return ctypes.c_void_p(self.cuda_stream) + + def __eq__(self, o): + if isinstance(o, Stream): + return super().__eq__(o) + return False + + def __hash__(self): + return hash((self.cuda_stream, self.device)) + + def __repr__(self): + return f"" + + +class ExternalStream(Stream): + r"""Wrapper around an externally allocated CUDA stream. + + This class is used to wrap streams allocated in other libraries in order + to facilitate data exchange and multi-library interactions. + + .. note:: This class doesn't manage the stream life-cycle, it is the user + responsibility to keep the referenced stream alive while this class is + being used. + + Args: + stream_ptr(int): Integer representation of the `cudaStream_t` value. + allocated externally. + device(torch.device or int, optional): the device where the stream + was originally allocated. if device is specified incorrectly, + subsequent launches using this stream may fail. + """ + + def __new__(cls, stream_ptr, device=None, **kwargs): + with torch.cuda.device(device): + return super().__new__(cls, stream_ptr=stream_ptr, **kwargs) + + +class Event(torch._C._CudaEventBase, _EventBase): + r"""Wrapper around a CUDA event. + + CUDA events are synchronization markers that can be used to monitor the + device's progress, to accurately measure timing, and to synchronize CUDA + streams. + + The underlying CUDA events are lazily initialized when the event is first + recorded or exported to another process. After creation, only streams on the + same device may record the event. However, streams on any device can wait on + the event. + + Args: + enable_timing (bool, optional): indicates if the event should measure time + (default: ``False``) + blocking (bool, optional): if ``True``, :meth:`wait` will be blocking (default: ``False``) + interprocess (bool): if ``True``, the event can be shared between processes + (default: ``False``) + + .. _CUDA Event Documentation: + https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html + """ + + def __new__(cls, enable_timing=False, blocking=False, interprocess=False): + return super().__new__( + cls, + enable_timing=enable_timing, + blocking=blocking, + interprocess=interprocess, + ) + + @classmethod + def from_ipc_handle(cls, device, handle): + r"""Reconstruct an event from an IPC handle on the given device.""" + return super().from_ipc_handle(device, handle) + + def record(self, stream=None): + r"""Record the event in a given stream. + + Uses ``torch.cuda.current_stream()`` if no stream is specified. The + stream's device must match the event's device. + """ + if stream is None: + stream = torch.cuda.current_stream() + super().record(stream) + + def wait(self, stream=None): + r"""Make all future work submitted to the given stream wait for this event. + + Use ``torch.cuda.current_stream()`` if no stream is specified. + + .. note:: This is a wrapper around ``cudaStreamWaitEvent()``: see + `CUDA Event documentation`_ for more info. + """ + if stream is None: + stream = torch.cuda.current_stream() + super().wait(stream) + + def query(self): + r"""Check if all work currently captured by event has completed. + + Returns: + A boolean indicating if all work currently captured by event has + completed. + """ + return super().query() + + def elapsed_time(self, end_event): + r"""Return the time elapsed. + + Time reported in milliseconds after the event was recorded and + before the end_event was recorded. + """ + return super().elapsed_time(end_event) + + def synchronize(self): + r"""Wait for the event to complete. + + Waits until the completion of all work currently captured in this event. + This prevents the CPU thread from proceeding until the event completes. + + .. note:: This is a wrapper around ``cudaEventSynchronize()``: see + `CUDA Event documentation`_ for more info. + """ + super().synchronize() + + def ipc_handle(self): + r"""Return an IPC handle of this event. + + If not recorded yet, the event will use the current device. + """ + return super().ipc_handle() + + @property + def _as_parameter_(self): + return ctypes.c_void_p(self.cuda_event) + + def __repr__(self): + if self.cuda_event: + return f"" + else: + return "" diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ade0dd3c472ffd10dc2b57f45596befce7dd1d2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0487145d46285ad995f83f473c59e50d4a7342cf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/_compatibility.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/_lazy_graph_module.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/_lazy_graph_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..875b921ae5307cacee190628458c253dda654aed Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/_lazy_graph_module.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/_pytree.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/_pytree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbf6eb783e70d05f61ad757ba899d129ce7cd528 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/_pytree.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1249612b51ccc2be8b7eabbd32d77306f88cbcdc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/_symbolic_trace.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95bb0a765d672dacfea1bf26945b4637c1e0c913 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/annotate.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..514fe3b899b91a3e41bdb47596da55e687bd8ba8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea9f291897d1899d4a30504b1d6d1f75a359422a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/graph.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caf2bb76a493884d9f490240566024d0bb469839 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/graph_module.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6cceca2ca6462d8c35020beb49746b62cdab175 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/immutable_collections.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/interpreter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/interpreter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7425d201ad935bdc8a5a92aaf3d79cefc5777d91 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/interpreter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f92eeabdff1bd12a229787195f476841703948fc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/node.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e6387885453170389a9b7ebf004424111891d8c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/operator_schemas.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..323c9af3d0bbc4bea41a94562f63ae179e9964f7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/proxy.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d716dc9ef0519386f5afc7791517e5b1d54ec7eb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/subgraph_rewriter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6aca95e1566df9148f51e97db329b21f4332f52a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/tensor_type.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/traceback.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/traceback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fda4d0f1cf3aa8f2b34ffacdfbeb4edb3eb189a4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/__pycache__/traceback.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef96ae1bd0344f8fc6e3064c8a4e4794c915665b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/cudagraphs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/cudagraphs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59ec2850a471276fff53cee1912e45a0fcb6456c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fx/passes/backends/__pycache__/cudagraphs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Config.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Config.cmake new file mode 100644 index 0000000000000000000000000000000000000000..bd1dbd8529bfd596fcae684f2790bce0b961cdfc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Config.cmake @@ -0,0 +1,140 @@ +# - Config file for the Caffe2 package +# It defines the following variable(s) +# CAFFE2_INCLUDE_DIRS - include directories for FooBar +# as well as Caffe2 targets for other cmake libraries to use. + +# library version information + +# Utils functions. +include("${CMAKE_CURRENT_LIST_DIR}/public/utils.cmake") + +# Depending on whether Caffe2 uses gflags during compile time or +# not, invoke gflags. +if(OFF) + include("${CMAKE_CURRENT_LIST_DIR}/public/gflags.cmake") + if(NOT TARGET gflags) + message(FATAL_ERROR + "Your installed Caffe2 version uses gflags but the gflags library " + "cannot be found. Did you accidentally remove it, or have you set " + "the right CMAKE_PREFIX_PATH and/or GFLAGS_ROOT_DIR? If you do not " + "have gflags, you will need to install gflags and set the library " + "path accordingly.") + endif() +endif() + +# Depending on whether Caffe2 uses glog during compile time or +# not, invoke glog. +if(OFF) + include("${CMAKE_CURRENT_LIST_DIR}/public/glog.cmake") + if(NOT TARGET glog::glog) + message(FATAL_ERROR + "Your installed Caffe2 version uses glog but the glog library " + "cannot be found. Did you accidentally remove it, or have you set " + "the right CMAKE_PREFIX_PATH and/or GFLAGS_ROOT_DIR? If you do not " + "have glog, you will need to install glog and set the library " + "path accordingly.") + endif() +endif() + +# Protobuf +if(ON) + if(NOT TARGET protobuf::libprotobuf) + # Define protobuf::libprotobuf as a dummy target to resolve references to + # protobuf::libprotobuf in Caffe2Targets.cmake. + add_library(dummy INTERFACE) + add_library(protobuf::libprotobuf ALIAS dummy) + endif() +else() + include("${CMAKE_CURRENT_LIST_DIR}/public/protobuf.cmake") + if(NOT TARGET protobuf::libprotobuf) + message(FATAL_ERROR + "Your installed Caffe2 version uses protobuf but the protobuf library " + "cannot be found. Did you accidentally remove it, or have you set " + "the right CMAKE_PREFIX_PATH? If you do not have protobuf, you will " + "need to install protobuf and set the library path accordingly.") + endif() + message(STATUS "Caffe2: Protobuf version " ${Protobuf_VERSION}) + # If during build time we know the protobuf version, we will also do a sanity + # check to ensure that the protobuf library that Caffe2 found is consistent + # with the compiled version. + if(FALSE) + if(NOT (${Protobuf_VERSION} VERSION_EQUAL Protobuf_VERSION_NOTFOUND)) + message(FATAL_ERROR + "Your installed Caffe2 is built with protobuf " + "Protobuf_VERSION_NOTFOUND" + ", while your current cmake setting discovers protobuf version " + ${Protobuf_VERSION} + ". Please specify a protobuf version that is the same as the built " + "version.") + endif() + endif() +endif() + +if (OFF) + include("${CMAKE_CURRENT_LIST_DIR}/public/LoadHIP.cmake") +endif() + +if(ON) + # The file public/cuda.cmake exclusively uses CAFFE2_USE_*. + # If Caffe2 was compiled with the libraries below, they must + # be found again when including the Caffe2 target. + set(CAFFE2_USE_CUDA ON) + set(CAFFE2_USE_TENSORRT OFF) + + # Add current directory to module path so we pick up FindCUDAToolkit.cmake + set(old_CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}") + list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}") + include("${CMAKE_CURRENT_LIST_DIR}/public/cuda.cmake") + set(CMAKE_MODULE_PATH "${old_CMAKE_MODULE_PATH}") + + if(ON AND NOT CAFFE2_USE_CUDA) + message(FATAL_ERROR + "Your installed Caffe2 version uses CUDA but I cannot find the CUDA " + "libraries. Please set the proper CUDA prefixes and / or install " + "CUDA.") + endif() + if(OFF AND NOT CAFFE2_USE_TENSORRT) + message(FATAL_ERROR + "Your installed Caffe2 version uses TensorRT but I cannot find the TensorRT " + "libraries. Please set the proper TensorRT prefixes and / or install " + "TensorRT.") + endif() +endif() + +if(OFF) + # Add current directory to module path so we pick up FindSYCLToolkit.cmake + set(old_CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}") + list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}") + include("${CMAKE_CURRENT_LIST_DIR}/public/xpu.cmake") + set(CMAKE_MODULE_PATH "${old_CMAKE_MODULE_PATH}") +endif() + +if(ON) + include("${CMAKE_CURRENT_LIST_DIR}/public/mkl.cmake") +endif() + +if(ON) + include("${CMAKE_CURRENT_LIST_DIR}/public/mkldnn.cmake") +endif() + +# import targets +include ("${CMAKE_CURRENT_LIST_DIR}/Caffe2Targets.cmake") + +# Interface libraries, that allows one to build proper link flags. +# We will also define a helper variable, Caffe2_MAIN_LIBS, that resolves to +# the main caffe2 libraries in cases of cuda presence / absence. +set(Caffe2_MAIN_LIBS torch_library) + +# include directory. +# +# Newer versions of CMake set the INTERFACE_INCLUDE_DIRECTORIES property +# of the imported targets. It is hence not necessary to add this path +# manually to the include search path for targets which link to gflags. +# The following lines are here for backward compatibility, in case one +# would like to use the old-style include path. +get_filename_component( + CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) +# Note: the current list dir is _INSTALL_PREFIX/share/cmake/Gloo. +get_filename_component( + _INSTALL_PREFIX "${CMAKE_CURRENT_LIST_DIR}/../../../" ABSOLUTE) +set(CAFFE2_INCLUDE_DIRS "${_INSTALL_PREFIX}/include") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Targets-release.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Targets-release.cmake new file mode 100644 index 0000000000000000000000000000000000000000..535b0b46668326d08d998217a8b9715ce510150e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Targets-release.cmake @@ -0,0 +1,59 @@ +#---------------------------------------------------------------- +# Generated CMake target import file for configuration "Release". +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "c10_cuda" for configuration "Release" +set_property(TARGET c10_cuda APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(c10_cuda PROPERTIES + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libc10_cuda.so" + IMPORTED_SONAME_RELEASE "libc10_cuda.so" + ) + +list(APPEND _IMPORT_CHECK_TARGETS c10_cuda ) +list(APPEND _IMPORT_CHECK_FILES_FOR_c10_cuda "${_IMPORT_PREFIX}/lib/libc10_cuda.so" ) + +# Import target "c10" for configuration "Release" +set_property(TARGET c10 APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(c10 PROPERTIES + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libc10.so" + IMPORTED_SONAME_RELEASE "libc10.so" + ) + +list(APPEND _IMPORT_CHECK_TARGETS c10 ) +list(APPEND _IMPORT_CHECK_FILES_FOR_c10 "${_IMPORT_PREFIX}/lib/libc10.so" ) + +# Import target "torch_cpu" for configuration "Release" +set_property(TARGET torch_cpu APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(torch_cpu PROPERTIES + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libtorch_cpu.so" + IMPORTED_SONAME_RELEASE "libtorch_cpu.so" + ) + +list(APPEND _IMPORT_CHECK_TARGETS torch_cpu ) +list(APPEND _IMPORT_CHECK_FILES_FOR_torch_cpu "${_IMPORT_PREFIX}/lib/libtorch_cpu.so" ) + +# Import target "torch_cuda" for configuration "Release" +set_property(TARGET torch_cuda APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(torch_cuda PROPERTIES + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libtorch_cuda.so" + IMPORTED_SONAME_RELEASE "libtorch_cuda.so" + ) + +list(APPEND _IMPORT_CHECK_TARGETS torch_cuda ) +list(APPEND _IMPORT_CHECK_FILES_FOR_torch_cuda "${_IMPORT_PREFIX}/lib/libtorch_cuda.so" ) + +# Import target "torch" for configuration "Release" +set_property(TARGET torch APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(torch PROPERTIES + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libtorch.so" + IMPORTED_SONAME_RELEASE "libtorch.so" + ) + +list(APPEND _IMPORT_CHECK_TARGETS torch ) +list(APPEND _IMPORT_CHECK_FILES_FOR_torch "${_IMPORT_PREFIX}/lib/libtorch.so" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Targets.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Targets.cmake new file mode 100644 index 0000000000000000000000000000000000000000..3d04a23a1805cda8bfbbfff18e8fdc30c973fe73 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Caffe2Targets.cmake @@ -0,0 +1,180 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.5) + message(FATAL_ERROR "CMake >= 2.6.0 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.6...3.17) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_targetsDefined) +set(_targetsNotDefined) +set(_expectedTargets) +foreach(_expectedTarget c10_cuda c10 torch_cpu torch_cpu_library torch_cuda torch_cuda_library torch torch_library) + list(APPEND _expectedTargets ${_expectedTarget}) + if(NOT TARGET ${_expectedTarget}) + list(APPEND _targetsNotDefined ${_expectedTarget}) + endif() + if(TARGET ${_expectedTarget}) + list(APPEND _targetsDefined ${_expectedTarget}) + endif() +endforeach() +if("${_targetsDefined}" STREQUAL "${_expectedTargets}") + unset(_targetsDefined) + unset(_targetsNotDefined) + unset(_expectedTargets) + set(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT "${_targetsDefined}" STREQUAL "") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_targetsDefined}\nTargets not yet defined: ${_targetsNotDefined}\n") +endif() +unset(_targetsDefined) +unset(_targetsNotDefined) +unset(_expectedTargets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target c10_cuda +add_library(c10_cuda SHARED IMPORTED) + +set_target_properties(c10_cuda PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" + INTERFACE_LINK_LIBRARIES "c10;torch::cudart" +) + +# Create imported target c10 +add_library(c10 SHARED IMPORTED) + +set_target_properties(c10 PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" +) + +# Create imported target torch_cpu +add_library(torch_cpu SHARED IMPORTED) + +set_target_properties(torch_cpu PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "USE_DISTRIBUTED;USE_C10D_GLOO;USE_RPC;USE_TENSORPIPE" + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" + INTERFACE_LINK_LIBRARIES "protobuf::libprotobuf;c10;caffe2::mkl" +) + +# Create imported target torch_cpu_library +add_library(torch_cpu_library INTERFACE IMPORTED) + +set_target_properties(torch_cpu_library PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "\$" + INTERFACE_COMPILE_OPTIONS "\$" + INTERFACE_INCLUDE_DIRECTORIES "\$" + INTERFACE_LINK_LIBRARIES "-Wl,--no-as-needed,\"\$\" -Wl,--as-needed;\$" + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "\$" +) + +# Create imported target torch_cuda +add_library(torch_cuda SHARED IMPORTED) + +set_target_properties(torch_cuda PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "USE_C10D_NCCL" + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" + INTERFACE_LINK_LIBRARIES "torch::cudart;c10_cuda;torch::nvtoolsext;torch_cpu_library" +) + +# Create imported target torch_cuda_library +add_library(torch_cuda_library INTERFACE IMPORTED) + +set_target_properties(torch_cuda_library PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "\$" + INTERFACE_COMPILE_OPTIONS "\$" + INTERFACE_INCLUDE_DIRECTORIES "\$" + INTERFACE_LINK_LIBRARIES "-Wl,--no-as-needed,\"\$\" -Wl,--as-needed;\$" + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "\$" +) + +# Create imported target torch +add_library(torch SHARED IMPORTED) + +set_target_properties(torch PROPERTIES + INTERFACE_LINK_LIBRARIES "torch_cpu_library;torch_cuda_library" +) + +# Create imported target torch_library +add_library(torch_library INTERFACE IMPORTED) + +set_target_properties(torch_library PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "\$" + INTERFACE_COMPILE_OPTIONS "\$" + INTERFACE_INCLUDE_DIRECTORIES "\$" + INTERFACE_LINK_LIBRARIES "-Wl,--no-as-needed,\"\$\" -Wl,--as-needed;\$" + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "\$" +) + +if(CMAKE_VERSION VERSION_LESS 3.0.0) + message(FATAL_ERROR "This file relies on consumers using CMake 3.0.0 or greater.") +endif() + +# Load information for each installed configuration. +get_filename_component(_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) +file(GLOB CONFIG_FILES "${_DIR}/Caffe2Targets-*.cmake") +foreach(f ${CONFIG_FILES}) + include(${f}) +endforeach() + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(target ${_IMPORT_CHECK_TARGETS} ) + foreach(file ${_IMPORT_CHECK_FILES_FOR_${target}} ) + if(NOT EXISTS "${file}" ) + message(FATAL_ERROR "The imported target \"${target}\" references the file + \"${file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + unset(_IMPORT_CHECK_FILES_FOR_${target}) +endforeach() +unset(_IMPORT_CHECK_TARGETS) + +# Make sure the targets which have been exported in some other +# export set exist. +unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets) +foreach(_target "protobuf::libprotobuf" ) + if(NOT TARGET "${_target}" ) + set(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets "${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets} ${_target}") + endif() +endforeach() + +if(DEFINED ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets) + if(CMAKE_FIND_PACKAGE_NAME) + set( ${CMAKE_FIND_PACKAGE_NAME}_FOUND FALSE) + set( ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}") + else() + message(FATAL_ERROR "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}") + endif() +endif() +unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUDAToolkit.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUDAToolkit.cmake new file mode 100644 index 0000000000000000000000000000000000000000..7c8a79c5493afa763ad1bc5499d5074892c4aafc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUDAToolkit.cmake @@ -0,0 +1,1073 @@ + +# This module is back-ported from CMake 3.17 and above to work with CMake 3.10 + +# Distributed under the OSI-approved BSD 3-Clause License. See accompanying +# file Copyright.txt or https://cmake.org/licensing for details. + +#[=======================================================================[.rst: +FindCUDAToolkit +--------------- + +.. versionadded:: 3.17 + +This script locates the NVIDIA CUDA toolkit and the associated libraries, but +does not require the ``CUDA`` language be enabled for a given project. This +module does not search for the NVIDIA CUDA Samples. + +.. versionadded:: 3.19 + QNX support. + +Search Behavior +^^^^^^^^^^^^^^^ + +The CUDA Toolkit search behavior uses the following order: + +1. If the ``CUDA`` language has been enabled we will use the directory + containing the compiler as the first search location for ``nvcc``. + +2. If the ``CUDAToolkit_ROOT`` cmake configuration variable (e.g., + ``-DCUDAToolkit_ROOT=/some/path``) *or* environment variable is defined, it + will be searched. If both an environment variable **and** a + configuration variable are specified, the *configuration* variable takes + precedence. + + The directory specified here must be such that the executable ``nvcc`` or + the appropriate ``version.txt`` file can be found underneath the specified + directory. + +3. If the CUDA_PATH environment variable is defined, it will be searched + for ``nvcc``. + +4. The user's path is searched for ``nvcc`` using :command:`find_program`. If + this is found, no subsequent search attempts are performed. Users are + responsible for ensuring that the first ``nvcc`` to show up in the path is + the desired path in the event that multiple CUDA Toolkits are installed. + +5. On Unix systems, if the symbolic link ``/usr/local/cuda`` exists, this is + used. No subsequent search attempts are performed. No default symbolic link + location exists for the Windows platform. + +6. The platform specific default install locations are searched. If exactly one + candidate is found, this is used. The default CUDA Toolkit install locations + searched are: + + +-------------+-------------------------------------------------------------+ + | Platform | Search Pattern | + +=============+=============================================================+ + | macOS | ``/Developer/NVIDIA/CUDA-X.Y`` | + +-------------+-------------------------------------------------------------+ + | Other Unix | ``/usr/local/cuda-X.Y`` | + +-------------+-------------------------------------------------------------+ + | Windows | ``C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\vX.Y`` | + +-------------+-------------------------------------------------------------+ + + Where ``X.Y`` would be a specific version of the CUDA Toolkit, such as + ``/usr/local/cuda-9.0`` or + ``C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v9.0`` + + .. note:: + + When multiple CUDA Toolkits are installed in the default location of a + system(e.g., both ``/usr/local/cuda-9.0`` and ``/usr/local/cuda-10.0`` + exist but the ``/usr/local/cuda`` symbolic link does **not** exist), this + package is marked as **not** found. + + There are too many factors involved in making an automatic decision in + the presence of multiple CUDA Toolkits being installed. In this + situation, users are encouraged to either (1) set ``CUDAToolkit_ROOT`` or + (2) ensure that the correct ``nvcc`` executable shows up in ``$PATH`` for + :command:`find_program` to find. + +Arguments +^^^^^^^^^ + +``[]`` + The ``[]`` argument requests a version with which the package found + should be compatible. See :ref:`find_package version format ` + for more details. + +Options +^^^^^^^ + +``REQUIRED`` + If specified, configuration will error if a suitable CUDA Toolkit is not + found. + +``QUIET`` + If specified, the search for a suitable CUDA Toolkit will not produce any + messages. + +``EXACT`` + If specified, the CUDA Toolkit is considered found only if the exact + ``VERSION`` specified is recovered. + +Imported targets +^^^^^^^^^^^^^^^^ + +An :ref:`imported target ` named ``CUDA::toolkit`` is provided. + +This module defines :prop_tgt:`IMPORTED` targets for each +of the following libraries that are part of the CUDAToolkit: + +- :ref:`CUDA Runtime Library` +- :ref:`CUDA Driver Library` +- :ref:`cuBLAS` +- :ref:`cuFFT` +- :ref:`cuRAND` +- :ref:`cuSOLVER` +- :ref:`cuSPARSE` +- :ref:`cuPTI` +- :ref:`NPP` +- :ref:`nvBLAS` +- :ref:`nvGRAPH` +- :ref:`nvJPEG` +- :ref:`nvidia-ML` +- :ref:`nvRTC` +- :ref:`nvToolsExt` +- :ref:`OpenCL` +- :ref:`cuLIBOS` + +.. _`cuda_toolkit_rt_lib`: + +CUDA Runtime Library +"""""""""""""""""""" + +The CUDA Runtime library (cudart) are what most applications will typically +need to link against to make any calls such as `cudaMalloc`, and `cudaFree`. + +Targets Created: + +- ``CUDA::cudart`` +- ``CUDA::cudart_static`` + +.. _`cuda_toolkit_driver_lib`: + +CUDA Driver Library +"""""""""""""""""""" + +The CUDA Driver library (cuda) are used by applications that use calls +such as `cuMemAlloc`, and `cuMemFree`. + +Targets Created: + +- ``CUDA::cuda_driver`` + +.. _`cuda_toolkit_cuBLAS`: + +cuBLAS +"""""" + +The `cuBLAS `_ library. + +Targets Created: + +- ``CUDA::cublas`` +- ``CUDA::cublas_static`` +- ``CUDA::cublasLt`` starting in CUDA 10.1 +- ``CUDA::cublasLt_static`` starting in CUDA 10.1 + +.. _`cuda_toolkit_cuFFT`: + +cuFFT +""""" + +The `cuFFT `_ library. + +Targets Created: + +- ``CUDA::cufft`` +- ``CUDA::cufftw`` +- ``CUDA::cufft_static`` +- ``CUDA::cufft_static_nocallback`` starting in CUDA 9.2, requires CMake 3.23+ +- ``CUDA::cufftw_static`` + +cuRAND +"""""" + +The `cuRAND `_ library. + +Targets Created: + +- ``CUDA::curand`` +- ``CUDA::curand_static`` + +.. _`cuda_toolkit_cuSOLVER`: + +cuSOLVER +"""""""" + +The `cuSOLVER `_ library. + +Targets Created: + +- ``CUDA::cusolver`` +- ``CUDA::cusolver_static`` + +.. _`cuda_toolkit_cuSPARSE`: + +cuSPARSE +"""""""" + +The `cuSPARSE `_ library. + +Targets Created: + +- ``CUDA::cusparse`` +- ``CUDA::cusparse_static`` + +.. _`cuda_toolkit_cupti`: + +cupti +""""" + +The `NVIDIA CUDA Profiling Tools Interface `_. + +Targets Created: + +- ``CUDA::cupti`` +- ``CUDA::cupti_static`` + +.. _`cuda_toolkit_NPP`: + +NPP +""" + +The `NPP `_ libraries. + +Targets Created: + +- `nppc`: + + - ``CUDA::nppc`` + - ``CUDA::nppc_static`` + +- `nppial`: Arithmetic and logical operation functions in `nppi_arithmetic_and_logical_operations.h` + + - ``CUDA::nppial`` + - ``CUDA::nppial_static`` + +- `nppicc`: Color conversion and sampling functions in `nppi_color_conversion.h` + + - ``CUDA::nppicc`` + - ``CUDA::nppicc_static`` + +- `nppicom`: JPEG compression and decompression functions in `nppi_compression_functions.h` + Removed starting in CUDA 11.0, use :ref:`nvJPEG` instead. + + - ``CUDA::nppicom`` + - ``CUDA::nppicom_static`` + +- `nppidei`: Data exchange and initialization functions in `nppi_data_exchange_and_initialization.h` + + - ``CUDA::nppidei`` + - ``CUDA::nppidei_static`` + +- `nppif`: Filtering and computer vision functions in `nppi_filter_functions.h` + + - ``CUDA::nppif`` + - ``CUDA::nppif_static`` + +- `nppig`: Geometry transformation functions found in `nppi_geometry_transforms.h` + + - ``CUDA::nppig`` + - ``CUDA::nppig_static`` + +- `nppim`: Morphological operation functions found in `nppi_morphological_operations.h` + + - ``CUDA::nppim`` + - ``CUDA::nppim_static`` + +- `nppist`: Statistics and linear transform in `nppi_statistics_functions.h` and `nppi_linear_transforms.h` + + - ``CUDA::nppist`` + - ``CUDA::nppist_static`` + +- `nppisu`: Memory support functions in `nppi_support_functions.h` + + - ``CUDA::nppisu`` + - ``CUDA::nppisu_static`` + +- `nppitc`: Threshold and compare operation functions in `nppi_threshold_and_compare_operations.h` + + - ``CUDA::nppitc`` + - ``CUDA::nppitc_static`` + +- `npps`: + + - ``CUDA::npps`` + - ``CUDA::npps_static`` + +.. _`cuda_toolkit_nvBLAS`: + +nvBLAS +"""""" + +The `nvBLAS `_ libraries. +This is a shared library only. + +Targets Created: + +- ``CUDA::nvblas`` + +.. _`cuda_toolkit_nvGRAPH`: + +nvGRAPH +""""""" + +The `nvGRAPH `_ library. +Removed starting in CUDA 11.0 + +Targets Created: + +- ``CUDA::nvgraph`` +- ``CUDA::nvgraph_static`` + + +.. _`cuda_toolkit_nvJPEG`: + +nvJPEG +"""""" + +The `nvJPEG `_ library. +Introduced in CUDA 10. + +Targets Created: + +- ``CUDA::nvjpeg`` +- ``CUDA::nvjpeg_static`` + +.. _`cuda_toolkit_nvRTC`: + +nvRTC +""""" + +The `nvRTC `_ (Runtime Compilation) library. +This is a shared library only. + +Targets Created: + +- ``CUDA::nvrtc`` + +.. _`cuda_toolkit_nvml`: + +nvidia-ML +""""""""" + +The `NVIDIA Management Library `_. +This is a shared library only. + +Targets Created: + +- ``CUDA::nvml`` + +.. _`cuda_toolkit_nvToolsExt`: + +nvToolsExt +"""""""""" + +The `NVIDIA Tools Extension `_. +This is a shared library only. + +Targets Created: + +- ``CUDA::nvToolsExt`` + +.. _`cuda_toolkit_opencl`: + +OpenCL +"""""" + +The `NVIDIA OpenCL Library `_. +This is a shared library only. + +Targets Created: + +- ``CUDA::OpenCL`` + +.. _`cuda_toolkit_cuLIBOS`: + +cuLIBOS +""""""" + +The cuLIBOS library is a backend thread abstraction layer library which is +static only. The ``CUDA::cublas_static``, ``CUDA::cusparse_static``, +``CUDA::cufft_static``, ``CUDA::curand_static``, and (when implemented) NPP +libraries all automatically have this dependency linked. + +Target Created: + +- ``CUDA::culibos`` + +**Note**: direct usage of this target by consumers should not be necessary. + +.. _`cuda_toolkit_cuRAND`: + + + +Result variables +^^^^^^^^^^^^^^^^ + +``CUDAToolkit_FOUND`` + A boolean specifying whether or not the CUDA Toolkit was found. + +``CUDAToolkit_VERSION`` + The exact version of the CUDA Toolkit found (as reported by + ``nvcc --version`` or ``version.txt``). + +``CUDAToolkit_VERSION_MAJOR`` + The major version of the CUDA Toolkit. + +``CUDAToolkit_VERSION_MINOR`` + The minor version of the CUDA Toolkit. + +``CUDAToolkit_VERSION_PATCH`` + The patch version of the CUDA Toolkit. + +``CUDAToolkit_BIN_DIR`` + The path to the CUDA Toolkit library directory that contains the CUDA + executable ``nvcc``. + +``CUDAToolkit_INCLUDE_DIRS`` + The path to the CUDA Toolkit ``include`` folder containing the header files + required to compile a project linking against CUDA. + +``CUDAToolkit_LIBRARY_DIR`` + The path to the CUDA Toolkit library directory that contains the CUDA + Runtime library ``cudart``. + +``CUDAToolkit_LIBRARY_ROOT`` + .. versionadded:: 3.18 + + The path to the CUDA Toolkit directory containing the nvvm directory and + version.txt. + +``CUDAToolkit_TARGET_DIR`` + The path to the CUDA Toolkit directory including the target architecture + when cross-compiling. When not cross-compiling this will be equivalent to + the parent directory of ``CUDAToolkit_BIN_DIR``. + +``CUDAToolkit_NVCC_EXECUTABLE`` + The path to the NVIDIA CUDA compiler ``nvcc``. Note that this path may + **not** be the same as + :variable:`CMAKE_CUDA_COMPILER _COMPILER>`. ``nvcc`` must be + found to determine the CUDA Toolkit version as well as determining other + features of the Toolkit. This variable is set for the convenience of + modules that depend on this one. + + +#]=======================================================================] + +# NOTE: much of this was simply extracted from FindCUDA.cmake. + +# James Bigler, NVIDIA Corp (nvidia.com - jbigler) +# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html +# +# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. +# +# Copyright (c) 2007-2009 +# Scientific Computing and Imaging Institute, University of Utah +# +# This code is licensed under the MIT License. See the FindCUDA.cmake script +# for the text of the license. + +# The MIT License +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +############################################################################### + +# The toolkit is located during compiler detection for CUDA and stored in CMakeCUDACompiler.cmake as +# CMAKE_CUDA_COMPILER_TOOLKIT_ROOT and CMAKE_CUDA_COMPILER_LIBRARY_ROOT. +# We compute the rest based on those here to avoid re-searching and to avoid finding a possibly +# different installation. +if(CMAKE_CUDA_COMPILER_TOOLKIT_ROOT) + set(CUDAToolkit_ROOT_DIR "${CMAKE_CUDA_COMPILER_TOOLKIT_ROOT}") + set(CUDAToolkit_LIBRARY_ROOT "${CMAKE_CUDA_COMPILER_LIBRARY_ROOT}") + set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_TOOLKIT_VERSION}") + + if(CUDAToolkit_VERSION MATCHES [=[([0-9]+)\.([0-9]+)\.([0-9]+)]=]) + set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}") + set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}") + set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}") + endif() +else() + function(_CUDAToolkit_find_root_dir ) + cmake_parse_arguments(arg "" "" "SEARCH_PATHS;FIND_FLAGS" ${ARGN}) + + if(NOT CUDAToolkit_BIN_DIR) + if(NOT CUDAToolkit_SENTINEL_FILE) + find_program(CUDAToolkit_NVCC_EXECUTABLE + NAMES nvcc nvcc.exe + PATHS ${arg_SEARCH_PATHS} + ${arg_FIND_FLAGS} + ) + endif() + + if(NOT CUDAToolkit_NVCC_EXECUTABLE) + find_file(CUDAToolkit_SENTINEL_FILE + NAMES version.txt + PATHS ${arg_SEARCH_PATHS} + NO_DEFAULT_PATH + ) + endif() + + if(EXISTS "${CUDAToolkit_NVCC_EXECUTABLE}") + # If NVCC exists then invoke it to find the toolkit location. + # This allows us to support wrapper scripts (e.g. ccache or colornvcc), CUDA Toolkit, + # NVIDIA HPC SDK, and distro's splayed layouts + execute_process(COMMAND ${CUDAToolkit_NVCC_EXECUTABLE} "-v" "__cmake_determine_cuda" + OUTPUT_VARIABLE _CUDA_NVCC_OUT ERROR_VARIABLE _CUDA_NVCC_OUT) + if(_CUDA_NVCC_OUT MATCHES "\\#\\$ TOP=([^\r\n]*)") + get_filename_component(CUDAToolkit_BIN_DIR "${CMAKE_MATCH_1}/bin" ABSOLUTE) + else() + get_filename_component(CUDAToolkit_BIN_DIR "${CUDAToolkit_NVCC_EXECUTABLE}" DIRECTORY) + endif() + unset(_CUDA_NVCC_OUT) + + mark_as_advanced(CUDAToolkit_BIN_DIR) + set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "" FORCE) + endif() + + if(CUDAToolkit_SENTINEL_FILE) + get_filename_component(CUDAToolkit_BIN_DIR ${CUDAToolkit_SENTINEL_FILE} DIRECTORY ABSOLUTE) + set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}/bin") + + set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "" FORCE) + mark_as_advanced(CUDAToolkit_BIN_DIR) + endif() + endif() + + if(CUDAToolkit_BIN_DIR) + get_filename_component(CUDAToolkit_ROOT_DIR ${CUDAToolkit_BIN_DIR} DIRECTORY ABSOLUTE) + set(CUDAToolkit_ROOT_DIR "${CUDAToolkit_ROOT_DIR}" PARENT_SCOPE) + endif() + + endfunction() + + # For NVCC we can easily deduce the SDK binary directory from the compiler path. + if(CMAKE_CUDA_COMPILER_LOADED AND NOT CUDAToolkit_BIN_DIR AND CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA") + get_filename_component(CUDAToolkit_BIN_DIR "${CMAKE_CUDA_COMPILER}" DIRECTORY) + set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "") + # Try language provided path first. + _CUDAToolkit_find_root_dir(SEARCH_PATHS "${CUDAToolkit_BIN_DIR}" FIND_FLAGS NO_DEFAULT_PATH) + mark_as_advanced(CUDAToolkit_BIN_DIR) + endif() + + # Try user provided path + if(NOT CUDAToolkit_ROOT_DIR AND CUDAToolkit_ROOT) + _CUDAToolkit_find_root_dir(SEARCH_PATHS "${CUDAToolkit_ROOT}" FIND_FLAGS PATH_SUFFIXES bin NO_DEFAULT_PATH) + endif() + if(NOT CUDAToolkit_ROOT_DIR) + _CUDAToolkit_find_root_dir(FIND_FLAGS PATHS ENV CUDA_PATH PATH_SUFFIXES bin) + endif() + + # If the user specified CUDAToolkit_ROOT but the toolkit could not be found, this is an error. + if(NOT CUDAToolkit_ROOT_DIR AND (DEFINED CUDAToolkit_ROOT OR DEFINED ENV{CUDAToolkit_ROOT})) + # Declare error messages now, print later depending on find_package args. + set(fail_base "Could not find nvcc executable in path specified by") + set(cuda_root_fail "${fail_base} CUDAToolkit_ROOT=${CUDAToolkit_ROOT}") + set(env_cuda_root_fail "${fail_base} environment variable CUDAToolkit_ROOT=$ENV{CUDAToolkit_ROOT}") + + if(CUDAToolkit_FIND_REQUIRED) + if(DEFINED CUDAToolkit_ROOT) + message(FATAL_ERROR ${cuda_root_fail}) + elseif(DEFINED ENV{CUDAToolkit_ROOT}) + message(FATAL_ERROR ${env_cuda_root_fail}) + endif() + else() + if(NOT CUDAToolkit_FIND_QUIETLY) + if(DEFINED CUDAToolkit_ROOT) + message(STATUS ${cuda_root_fail}) + elseif(DEFINED ENV{CUDAToolkit_ROOT}) + message(STATUS ${env_cuda_root_fail}) + endif() + endif() + set(CUDAToolkit_FOUND FALSE) + unset(fail_base) + unset(cuda_root_fail) + unset(env_cuda_root_fail) + return() + endif() + endif() + + # CUDAToolkit_ROOT cmake / env variable not specified, try platform defaults. + # + # - Linux: /usr/local/cuda-X.Y + # - macOS: /Developer/NVIDIA/CUDA-X.Y + # - Windows: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\vX.Y + # + # We will also search the default symlink location /usr/local/cuda first since + # if CUDAToolkit_ROOT is not specified, it is assumed that the symlinked + # directory is the desired location. + if(NOT CUDAToolkit_ROOT_DIR) + if(UNIX) + if(NOT APPLE) + set(platform_base "/usr/local/cuda-") + else() + set(platform_base "/Developer/NVIDIA/CUDA-") + endif() + else() + set(platform_base "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v") + endif() + + # Build out a descending list of possible cuda installations, e.g. + file(GLOB possible_paths "${platform_base}*") + # Iterate the glob results and create a descending list. + set(versions) + foreach(p ${possible_paths}) + # Extract version number from end of string + string(REGEX MATCH "[0-9][0-9]?\\.[0-9]$" p_version ${p}) + if(IS_DIRECTORY ${p} AND p_version) + list(APPEND versions ${p_version}) + endif() + endforeach() + + # Sort numerically in descending order, so we try the newest versions first. + if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.18) + list(SORT versions COMPARE NATURAL ORDER DESCENDING) + elseif(versions) + # Alphabetical sort here is not ideal but better than nothing + list(SORT versions) + list(REVERSE versions) + endif() + + # With a descending list of versions, populate possible paths to search. + set(search_paths) + foreach(v ${versions}) + list(APPEND search_paths "${platform_base}${v}") + endforeach() + + # Force the global default /usr/local/cuda to the front on Unix. + if(UNIX) + list(INSERT search_paths 0 "/usr/local/cuda") + endif() + + # Now search for the toolkit again using the platform default search paths. + _CUDAToolkit_find_root_dir(SEARCH_PATHS "${search_paths}" FIND_FLAGS PATH_SUFFIXES bin) + + # We are done with these variables now, cleanup for caller. + unset(platform_base) + unset(possible_paths) + unset(versions) + unset(search_paths) + + if(NOT CUDAToolkit_ROOT_DIR) + if(CUDAToolkit_FIND_REQUIRED) + message(FATAL_ERROR "Could not find nvcc, please set CUDAToolkit_ROOT.") + elseif(NOT CUDAToolkit_FIND_QUIETLY) + message(STATUS "Could not find nvcc, please set CUDAToolkit_ROOT.") + endif() + + set(CUDAToolkit_FOUND FALSE) + return() + endif() + endif() +endif() + +if(NOT CUDAToolkit_BIN_DIR) + set(CUDAToolkit_BIN_DIR "${CUDAToolkit_ROOT_DIR}/bin") +endif() + +if(NOT CUDAToolkit_NVCC_EXECUTABLE) + set(CUDAToolkit_NVCC_EXECUTABLE "${CUDAToolkit_BIN_DIR}/nvcc${CMAKE_EXECUTABLE_SUFFIX}") +endif() + +if(CMAKE_CUDA_COMPILER_TOOLKIT_VERSION) + set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_TOOLKIT_VERSION}") +else() + function(_CUDAToolkit_find_version_file result_variable) + # We first check for a non-scattered installation to prefer it over a scattered installation. + if(CUDAToolkit_ROOT AND EXISTS "${CUDAToolkit_ROOT}/version.txt") + set(${result_variable} "${CUDAToolkit_ROOT}/version.txt" PARENT_SCOPE) + elseif(CUDAToolkit_ROOT_DIR AND EXISTS "${CUDAToolkit_ROOT_DIR}/version.txt") + set(${result_variable} "${CUDAToolkit_ROOT_DIR}/version.txt" PARENT_SCOPE) + elseif(CMAKE_SYSROOT_LINK AND EXISTS "${CMAKE_SYSROOT_LINK}/usr/lib/cuda/version.txt") + set(${result_variable} "${CMAKE_SYSROOT_LINK}/usr/lib/cuda/version.txt" PARENT_SCOPE) + elseif(EXISTS "${CMAKE_SYSROOT}/usr/lib/cuda/version.txt") + set(${result_variable} "${CMAKE_SYSROOT}/usr/lib/cuda/version.txt" PARENT_SCOPE) + endif() + endfunction() + + _CUDAToolkit_find_version_file( _CUDAToolkit_version_file ) + if(_CUDAToolkit_version_file) + # CUDAToolkit_LIBRARY_ROOT contains the device library and version file. + get_filename_component(CUDAToolkit_LIBRARY_ROOT "${_CUDAToolkit_version_file}" DIRECTORY ABSOLUTE) + endif() + unset(_CUDAToolkit_version_file) + + if(CUDAToolkit_NVCC_EXECUTABLE AND + CMAKE_CUDA_COMPILER_VERSION AND + CUDAToolkit_NVCC_EXECUTABLE STREQUAL CMAKE_CUDA_COMPILER) + # Need to set these based off the already computed CMAKE_CUDA_COMPILER_VERSION value + # This if statement will always match, but is used to provide variables for MATCH 1,2,3... + if(CMAKE_CUDA_COMPILER_VERSION MATCHES [=[([0-9]+)\.([0-9]+)\.([0-9]+)]=]) + set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}") + set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}") + set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}") + set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_VERSION}") + endif() + elseif(CUDAToolkit_NVCC_EXECUTABLE) + # Compute the version by invoking nvcc + execute_process(COMMAND ${CUDAToolkit_NVCC_EXECUTABLE} "--version" OUTPUT_VARIABLE NVCC_OUT) + if(NVCC_OUT MATCHES [=[ V([0-9]+)\.([0-9]+)\.([0-9]+)]=]) + set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}") + set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}") + set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}") + set(CUDAToolkit_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}") + endif() + unset(NVCC_OUT) + else() + _CUDAToolkit_find_version_file(version_file) + if(version_file) + file(READ "${version_file}" VERSION_INFO) + if(VERSION_INFO MATCHES [=[CUDA Version ([0-9]+)\.([0-9]+)\.([0-9]+)]=]) + set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}") + set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}") + set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}") + set(CUDAToolkit_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}") + endif() + endif() + endif() +endif() + +# Find target directory when crosscompiling. +if(CMAKE_CROSSCOMPILING) + if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7-a") + # Support for NVPACK + set(CUDAToolkit_TARGET_NAME "armv7-linux-androideabi") + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm") + set(CUDAToolkit_TARGET_NAME "armv7-linux-gnueabihf") + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") + if(ANDROID_ARCH_NAME STREQUAL "arm64") + set(CUDAToolkit_TARGET_NAME "aarch64-linux-androideabi") + elseif(CMAKE_SYSTEM_NAME STREQUAL "QNX") + set(CUDAToolkit_TARGET_NAME "aarch64-qnx") + else() + set(CUDAToolkit_TARGET_NAME "aarch64-linux") + endif(ANDROID_ARCH_NAME STREQUAL "arm64") + elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") + set(CUDAToolkit_TARGET_NAME "x86_64-linux") + endif() + + if(EXISTS "${CUDAToolkit_ROOT_DIR}/targets/${CUDAToolkit_TARGET_NAME}") + set(CUDAToolkit_TARGET_DIR "${CUDAToolkit_ROOT_DIR}/targets/${CUDAToolkit_TARGET_NAME}") + # add known CUDA target root path to the set of directories we search for programs, libraries and headers + list(PREPEND CMAKE_FIND_ROOT_PATH "${CUDAToolkit_TARGET_DIR}") + + # Mark that we need to pop the root search path changes after we have + # found all cuda libraries so that searches for our cross-compilation + # libraries work when another cuda sdk is in CMAKE_PREFIX_PATH or + # PATh + set(_CUDAToolkit_Pop_ROOT_PATH True) + endif() +endif() + +# If not already set we can simply use the toolkit root or it's a scattered installation. +if(NOT CUDAToolkit_TARGET_DIR) + # Not cross compiling + set(CUDAToolkit_TARGET_DIR "${CUDAToolkit_ROOT_DIR}") + # Now that we have the real ROOT_DIR, find components inside it. + list(APPEND CMAKE_PREFIX_PATH ${CUDAToolkit_ROOT_DIR}) + + # Mark that we need to pop the prefix path changes after we have + # found the cudart library. + set(_CUDAToolkit_Pop_Prefix True) +endif() + +# CUDAToolkit_TARGET_DIR always points to the directory containing the include directory. +# On a scattered installation /usr, on a non-scattered something like /usr/local/cuda or /usr/local/cuda-10.2/targets/aarch64-linux. +if(EXISTS "${CUDAToolkit_TARGET_DIR}/include/cuda_runtime.h") + set(CUDAToolkit_INCLUDE_DIR "${CUDAToolkit_TARGET_DIR}/include") +elseif(NOT CUDAToolkit_FIND_QUIETLY) + message(STATUS "Unable to find cuda_runtime.h in \"${CUDAToolkit_TARGET_DIR}/include\" for CUDAToolkit_INCLUDE_DIR.") +endif() + +# The NVHPC layout moves math library headers and libraries to a sibling directory. +# Create a separate variable so this directory can be selectively added to math targets. +if(NOT EXISTS "${CUDAToolkit_INCLUDE_DIR}/cublas_v2.h") + set(CUDAToolkit_MATH_INCLUDE_DIR "${CUDAToolkit_TARGET_DIR}/../../math_libs/include") + get_filename_component(CUDAToolkit_MATH_INCLUDE_DIR "${CUDAToolkit_MATH_INCLUDE_DIR}" ABSOLUTE) + if(NOT EXISTS "${CUDAToolkit_MATH_INCLUDE_DIR}/cublas_v2.h") + if(NOT CUDAToolkit_FIND_QUIETLY) + message(STATUS "Unable to find cublas_v2.h in either \"${CUDAToolkit_INCLUDE_DIR}\" or \"${CUDAToolkit_MATH_INCLUDE_DIR}\"") + endif() + unset(CUDAToolkit_MATH_INCLUDE_DIR) + endif() +endif() + +# Find the CUDA Runtime Library libcudart +find_library(CUDA_CUDART + NAMES cudart + PATH_SUFFIXES lib64 lib/x64 +) +find_library(CUDA_CUDART + NAMES cudart + PATH_SUFFIXES lib64/stubs lib/x64/stubs +) + +if(NOT CUDA_CUDART AND NOT CUDAToolkit_FIND_QUIETLY) + message(STATUS "Unable to find cudart library.") +endif() + +if(_CUDAToolkit_Pop_Prefix) + list(REMOVE_AT CMAKE_PREFIX_PATH -1) + unset(_CUDAToolkit_Pop_Prefix) +endif() + +#----------------------------------------------------------------------------- +# Perform version comparison and validate all required variables are set. +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(CUDAToolkit + REQUIRED_VARS + CUDAToolkit_INCLUDE_DIR + CUDAToolkit_VERSION + CUDA_CUDART + CUDAToolkit_BIN_DIR + VERSION_VAR + CUDAToolkit_VERSION +) + +mark_as_advanced(CUDA_CUDART + CUDAToolkit_INCLUDE_DIR + CUDAToolkit_NVCC_EXECUTABLE + CUDAToolkit_SENTINEL_FILE + ) + +#----------------------------------------------------------------------------- +# Construct result variables +if(CUDAToolkit_FOUND) + set(CUDAToolkit_INCLUDE_DIRS ${CUDAToolkit_INCLUDE_DIR}) + get_filename_component(CUDAToolkit_LIBRARY_DIR ${CUDA_CUDART} DIRECTORY ABSOLUTE) +endif() + +#----------------------------------------------------------------------------- +# Construct import targets +if(CUDAToolkit_FOUND) + + function(_CUDAToolkit_find_and_add_import_lib lib_name) + cmake_parse_arguments(arg "" "" "ALT;DEPS;EXTRA_HINTS;EXTRA_PATH_SUFFIXES;EXTRA_INCLUDE_DIRS" ${ARGN}) + + set(search_names ${lib_name} ${arg_ALT}) + + find_library(CUDA_${lib_name}_LIBRARY + NAMES ${search_names} + HINTS ${CUDAToolkit_LIBRARY_DIR} + ENV CUDA_PATH + ${arg_EXTRA_HINTS} + PATH_SUFFIXES nvidia/current lib64 lib/x64 lib + ${arg_EXTRA_PATH_SUFFIXES} + ) + # Don't try any stub directories until we have exhausted all other + # search locations. + find_library(CUDA_${lib_name}_LIBRARY + NAMES ${search_names} + HINTS ${CUDAToolkit_LIBRARY_DIR} + ENV CUDA_PATH + ${arg_EXTRA_HINTS} + PATH_SUFFIXES lib64/stubs lib/x64/stubs lib/stubs stubs + # Support NVHPC splayed math library layout + ../../math_libs/${CUDAToolkit_VERSION_MAJOR}.${CUDAToolkit_VERSION_MINOR}/lib64 + ../../math_libs/lib64 + ) + + mark_as_advanced(CUDA_${lib_name}_LIBRARY) + + if(NOT TARGET CUDA::${lib_name} AND CUDA_${lib_name}_LIBRARY) + add_library(CUDA::${lib_name} UNKNOWN IMPORTED) + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}") + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}") + if(DEFINED CUDAToolkit_MATH_INCLUDE_DIR) + string(FIND ${CUDA_${lib_name}_LIBRARY} "math_libs" math_libs) + if(NOT ${math_libs} EQUAL -1) + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_MATH_INCLUDE_DIRS}") + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_MATH_INCLUDE_DIRS}") + endif() + endif() + set_property(TARGET CUDA::${lib_name} PROPERTY IMPORTED_LOCATION "${CUDA_${lib_name}_LIBRARY}") + foreach(dep ${arg_DEPS}) + if(TARGET CUDA::${dep}) + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_LINK_LIBRARIES CUDA::${dep}) + endif() + endforeach() + if(arg_EXTRA_INCLUDE_DIRS) + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_INCLUDE_DIRECTORIES "${arg_EXTRA_INCLUDE_DIRS}") + set_property(TARGET CUDA::${lib_name} APPEND PROPERTY + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${arg_EXTRA_INCLUDE_DIRS}") + endif() + endif() + endfunction() + + if(NOT TARGET CUDA::toolkit) + add_library(CUDA::toolkit IMPORTED INTERFACE) + set_property(TARGET CUDA::toolkit APPEND PROPERTY + INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}") + set_property(TARGET CUDA::toolkit APPEND PROPERTY + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}") + endif() + + _CUDAToolkit_find_and_add_import_lib(cuda_driver ALT cuda) + + _CUDAToolkit_find_and_add_import_lib(cudart) + _CUDAToolkit_find_and_add_import_lib(cudart_static) + + # setup dependencies that are required for cudart_static when building + # on linux. These are generally only required when using the CUDA toolkit + # when CUDA language is disabled + if(NOT TARGET CUDA::cudart_static_deps + AND TARGET CUDA::cudart_static) + + add_library(CUDA::cudart_static_deps IMPORTED INTERFACE) + set_property(TARGET CUDA::cudart_static APPEND PROPERTY + INTERFACE_LINK_LIBRARIES CUDA::cudart_static_deps) + + if(UNIX AND (CMAKE_C_COMPILER OR CMAKE_CXX_COMPILER)) + find_package(Threads REQUIRED) + set_property(TARGET CUDA::cudart_static_deps APPEND PROPERTY + INTERFACE_LINK_LIBRARIES Threads::Threads ${CMAKE_DL_LIBS}) + endif() + + if(UNIX AND NOT APPLE AND NOT (CMAKE_SYSTEM_NAME STREQUAL "QNX")) + # On Linux, you must link against librt when using the static cuda runtime. + find_library(CUDAToolkit_rt_LIBRARY rt) + mark_as_advanced(CUDAToolkit_rt_LIBRARY) + if(NOT CUDAToolkit_rt_LIBRARY) + message(WARNING "Could not find librt library, needed by CUDA::cudart_static") + else() + set_property(TARGET CUDA::cudart_static_deps APPEND PROPERTY + INTERFACE_LINK_LIBRARIES ${CUDAToolkit_rt_LIBRARY}) + endif() + endif() + endif() + + _CUDAToolkit_find_and_add_import_lib(culibos) # it's a static library + foreach(cuda_lib cublasLt cufft curand cusparse nppc nvjpeg) + _CUDAToolkit_find_and_add_import_lib(${cuda_lib}) + _CUDAToolkit_find_and_add_import_lib(${cuda_lib}_static DEPS culibos) + endforeach() + + if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 11.0.0) + # cublas depends on cublasLt + # https://docs.nvidia.com/cuda/archive/11.0/cublas/index.html#static-library + _CUDAToolkit_find_and_add_import_lib(cublas DEPS cublasLt) + _CUDAToolkit_find_and_add_import_lib(cublas_static DEPS cublasLt_static) + else() + _CUDAToolkit_find_and_add_import_lib(cublas) + _CUDAToolkit_find_and_add_import_lib(cublas_static DEPS culibos) + endif() + + # cuFFTW depends on cuFFT + _CUDAToolkit_find_and_add_import_lib(cufftw DEPS cufft) + _CUDAToolkit_find_and_add_import_lib(cufftw_static DEPS cufft_static) + if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 9.2) + _CUDAToolkit_find_and_add_import_lib(cufft_static_nocallback DEPS culibos) + endif() + + # cuSOLVER depends on cuBLAS, and cuSPARSE + _CUDAToolkit_find_and_add_import_lib(cusolver DEPS cublas cusparse) + _CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cublas_static cusparse_static culibos) + + + if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 10.1.2) + # cusolver depends on liblapack_static.a starting with CUDA 10.1 update 2, + # https://docs.nvidia.com/cuda/archive/11.5.0/cusolver/index.html#static-link-lapack + _CUDAToolkit_find_and_add_import_lib(cusolver_lapack_static ALT lapack_static) # implementation detail static lib + _CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cusolver_lapack_static) + endif() + + if(CUDAToolkit_VERSION VERSION_GREATER 11.2.1) + # cusolver depends on libcusolver_metis and cublasLt + # https://docs.nvidia.com/cuda/archive/11.2.2/cusolver/index.html#link-dependency + _CUDAToolkit_find_and_add_import_lib(cusolver DEPS cublasLt) + + _CUDAToolkit_find_and_add_import_lib(cusolver_metis_static ALT metis_static) # implementation detail static lib + _CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cusolver_metis_static cublasLt_static) + endif() + + # nvGRAPH depends on cuRAND, and cuSOLVER. + _CUDAToolkit_find_and_add_import_lib(nvgraph DEPS curand cusolver) + _CUDAToolkit_find_and_add_import_lib(nvgraph_static DEPS curand_static cusolver_static) + + # Process the majority of the NPP libraries. + foreach(cuda_lib nppial nppicc nppidei nppif nppig nppim nppist nppitc npps nppicom nppisu) + _CUDAToolkit_find_and_add_import_lib(${cuda_lib} DEPS nppc) + _CUDAToolkit_find_and_add_import_lib(${cuda_lib}_static DEPS nppc_static) + endforeach() + + find_path(CUDAToolkit_CUPTI_INCLUDE_DIR cupti.h PATHS + "${CUDAToolkit_ROOT_DIR}/extras/CUPTI/include" + "${CUDAToolkit_INCLUDE_DIR}/../extras/CUPTI/include" + "${CUDAToolkit_INCLUDE_DIR}" + NO_DEFAULT_PATH) + mark_as_advanced(CUDAToolkit_CUPTI_INCLUDE_DIR) + + if(CUDAToolkit_CUPTI_INCLUDE_DIR) + _CUDAToolkit_find_and_add_import_lib(cupti + EXTRA_PATH_SUFFIXES ../extras/CUPTI/lib64/ + ../extras/CUPTI/lib/ + EXTRA_INCLUDE_DIRS "${CUDAToolkit_CUPTI_INCLUDE_DIR}") + _CUDAToolkit_find_and_add_import_lib(cupti_static + EXTRA_PATH_SUFFIXES ../extras/CUPTI/lib64/ + ../extras/CUPTI/lib/ + EXTRA_INCLUDE_DIRS "${CUDAToolkit_CUPTI_INCLUDE_DIR}") + endif() + + _CUDAToolkit_find_and_add_import_lib(nvrtc DEPS cuda_driver) + + _CUDAToolkit_find_and_add_import_lib(nvml ALT nvidia-ml nvml) + + # nvtools can be installed outside the CUDA toolkit directory, + # so search the NVTOOLSEXT_PATH windows only environment variable + set(nvToolsExt_EXTRA_PATH) + if(WIN32) + set(nvToolsExt_EXTRA_PATH "C:\\Program Files\\NVIDIA Corporation\\NvToolsExt") + endif() + + find_path(CUDAToolkit_nvToolsExt_INCLUDE_DIR nvToolsExt.h + PATHS "${CUDAToolkit_INCLUDE_DIR}" + "${CUDAToolkit_ROOT_DIR}" + ENV NVTOOLSEXT_PATH + "${nvToolsExt_EXTRA_PATH}" + PATH_SUFFIXES include + NO_DEFAULT_PATH) + mark_as_advanced(CUDAToolkit_nvToolsExt_INCLUDE_DIR) + + if(CUDAToolkit_nvToolsExt_INCLUDE_DIR) + _CUDAToolkit_find_and_add_import_lib(nvToolsExt + ALT nvToolsExt64 nvToolsExt64_1 + EXTRA_HINTS ENV NVTOOLSEXT_PATH + "${nvToolsExt_EXTRA_PATH}" + EXTRA_INCLUDE_DIRS "${CUDAToolkit_nvToolsExt_INCLUDE_DIR}") + endif() + + _CUDAToolkit_find_and_add_import_lib(OpenCL) +endif() + +unset(CUDAToolkit_ROOT_DIR) + +if(_CUDAToolkit_Pop_ROOT_PATH) + list(REMOVE_AT CMAKE_FIND_ROOT_PATH 0) + unset(_CUDAToolkit_Pop_ROOT_PATH) +endif() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUSPARSELT.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUSPARSELT.cmake new file mode 100644 index 0000000000000000000000000000000000000000..6c15bde147469ddc84980dca0c756e8f26e1ddb1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindCUSPARSELT.cmake @@ -0,0 +1,67 @@ +# Find the CUSPARSELT library +# +# The following variables are optionally searched for defaults +# CUSPARSELT_ROOT: Base directory where CUSPARSELT is found +# CUSPARSELT_INCLUDE_DIR: Directory where CUSPARSELT header is searched for +# CUSPARSELT_LIBRARY: Directory where CUSPARSELT library is searched for +# +# The following are set after configuration is done: +# CUSPARSELT_FOUND +# CUSPARSELT_INCLUDE_PATH +# CUSPARSELT_LIBRARY_PATH + +include(FindPackageHandleStandardArgs) + +set(CUSPARSELT_ROOT $ENV{CUSPARSELT_ROOT_DIR} CACHE PATH "Folder containing NVIDIA cuSPARSELt") +if (DEFINED $ENV{CUSPARSELT_ROOT_DIR}) + message(WARNING "CUSPARSELT_ROOT_DIR is deprecated. Please set CUSPARSELT_ROOT instead.") +endif() +list(APPEND CUSPARSELT_ROOT $ENV{CUSPARSELT_ROOT_DIR} ${CUDA_TOOLKIT_ROOT_DIR}) + +# Compatible layer for CMake <3.12. CUSPARSELT_ROOT will be accounted in for searching paths and libraries for CMake >=3.12. +list(APPEND CMAKE_PREFIX_PATH ${CUSPARSELT_ROOT}) + +set(CUSPARSELT_INCLUDE_DIR $ENV{CUSPARSELT_INCLUDE_DIR} CACHE PATH "Folder containing NVIDIA cuSPARSELt header files") + +find_path(CUSPARSELT_INCLUDE_PATH cusparseLt.h + HINTS ${CUSPARSELT_INCLUDE_DIR} + PATH_SUFFIXES cuda/include cuda include) + +set(CUSPARSELT_LIBRARY $ENV{CUSPARSELT_LIBRARY} CACHE PATH "Path to the cusparselt library file (e.g., libcusparseLt.so)") + +set(CUSPARSELT_LIBRARY_NAME "libcusparseLt.so") +if(MSVC) + set(CUSPARSELT_LIBRARY_NAME "cusparseLt.lib") +endif() + +find_library(CUSPARSELT_LIBRARY_PATH ${CUSPARSELT_LIBRARY_NAME} + PATHS ${CUSPARSELT_LIBRARY} + PATH_SUFFIXES lib lib64 cuda/lib cuda/lib64 lib/x64) + +find_package_handle_standard_args(CUSPARSELT DEFAULT_MSG CUSPARSELT_LIBRARY_PATH CUSPARSELT_INCLUDE_PATH) + +if(CUSPARSELT_FOUND) + # Get cuSPARSELt version + file(READ ${CUSPARSELT_INCLUDE_PATH}/cusparseLt.h CUSPARSELT_HEADER_CONTENTS) + string(REGEX MATCH "define CUSPARSELT_VER_MAJOR * +([0-9]+)" + CUSPARSELT_VERSION_MAJOR "${CUSPARSELT_HEADER_CONTENTS}") + string(REGEX REPLACE "define CUSPARSELT_VER_MAJOR * +([0-9]+)" "\\1" + CUSPARSELT_VERSION_MAJOR "${CUSPARSELT_VERSION_MAJOR}") + string(REGEX MATCH "define CUSPARSELT_VER_MINOR * +([0-9]+)" + CUSPARSELT_VERSION_MINOR "${CUSPARSELT_HEADER_CONTENTS}") + string(REGEX REPLACE "define CUSPARSELT_VER_MINOR * +([0-9]+)" "\\1" + CUSPARSELT_VERSION_MINOR "${CUSPARSELT_VERSION_MINOR}") + string(REGEX MATCH "define CUSPARSELT_VER_PATCH * +([0-9]+)" + CUSPARSELT_VERSION_PATCH "${CUSPARSELT_HEADER_CONTENTS}") + string(REGEX REPLACE "define CUSPARSELT_VER_PATCH * +([0-9]+)" "\\1" + CUSPARSELT_VERSION_PATCH "${CUSPARSELT_VERSION_PATCH}") + # Assemble cuSPARSELt version. Use minor version since current major version is 0. + if(NOT CUSPARSELT_VERSION_MINOR) + set(CUSPARSELT_VERSION "?") + else() + set(CUSPARSELT_VERSION + "${CUSPARSELT_VERSION_MAJOR}.${CUSPARSELT_VERSION_MINOR}.${CUSPARSELT_VERSION_PATCH}") + endif() +endif() + +mark_as_advanced(CUSPARSELT_ROOT CUSPARSELT_INCLUDE_DIR CUSPARSELT_LIBRARY CUSPARSELT_VERSION) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindSYCLToolkit.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindSYCLToolkit.cmake new file mode 100644 index 0000000000000000000000000000000000000000..758c4378636b4bd4fdd54e56e2a897b360a1eaa1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/FindSYCLToolkit.cmake @@ -0,0 +1,70 @@ +# This will define the following variables: +# SYCL_FOUND : True if the system has the SYCL library. +# SYCL_INCLUDE_DIR : Include directories needed to use SYCL. +# SYCL_LIBRARY_DIR :The path to the SYCL library. +# SYCL_LIBRARY : SYCL library fullname. + +include(FindPackageHandleStandardArgs) + +set(SYCL_ROOT "") +if(DEFINED ENV{SYCL_ROOT}) + set(SYCL_ROOT $ENV{SYCL_ROOT}) +elseif(DEFINED ENV{CMPLR_ROOT}) + set(SYCL_ROOT $ENV{CMPLR_ROOT}) +endif() + +string(COMPARE EQUAL "${SYCL_ROOT}" "" nosyclfound) +if(nosyclfound) + set(SYCL_FOUND False) + set(SYCL_REASON_FAILURE "SYCL library not set!!") + set(SYCL_NOT_FOUND_MESSAGE "${SYCL_REASON_FAILURE}") + return() +endif() + +# Find include path from binary. +find_file( + SYCL_INCLUDE_DIR + NAMES include + HINTS ${SYCL_ROOT} + NO_DEFAULT_PATH + ) + +# Find include/sycl path from include path. +find_file( + SYCL_INCLUDE_SYCL_DIR + NAMES sycl + HINTS ${SYCL_ROOT}/include/ + NO_DEFAULT_PATH + ) + +# Due to the unrecognized compilation option `-fsycl` in other compiler. +list(APPEND SYCL_INCLUDE_DIR ${SYCL_INCLUDE_SYCL_DIR}) + +# Find library directory from binary. +find_file( + SYCL_LIBRARY_DIR + NAMES lib lib64 + HINTS ${SYCL_ROOT} + NO_DEFAULT_PATH + ) + +# Find SYCL library fullname. +find_library( + SYCL_LIBRARY + NAMES sycl + HINTS ${SYCL_LIBRARY_DIR} + NO_DEFAULT_PATH +) + +if((NOT SYCL_INCLUDE_DIR) OR (NOT SYCL_LIBRARY_DIR) OR (NOT SYCL_LIBRARY)) + set(SYCL_FOUND False) + set(SYCL_REASON_FAILURE "SYCL library is incomplete!!") + set(SYCL_NOT_FOUND_MESSAGE "${SYCL_REASON_FAILURE}") + return() +endif() + +find_package_handle_standard_args( + SYCL + FOUND_VAR SYCL_FOUND + REQUIRED_VARS SYCL_INCLUDE_DIR SYCL_LIBRARY_DIR SYCL_LIBRARY + REASON_FAILURE_MESSAGE "${SYCL_REASON_FAILURE}") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/FindCUDA.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/FindCUDA.cmake new file mode 100644 index 0000000000000000000000000000000000000000..017ea59578fe34dbca4984d09862d2359361180a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/FindCUDA.cmake @@ -0,0 +1,11 @@ +# This is a wrapper of the upstream `./upstream/FindCUDA.cmake` that +# automatically includes `./upstream/CMakeInitializeConfigs.cmake` before +# `./upstream/FindCUDA.cmake`. The `CMakeInitializeConfigs.cmake`, which is +# absent in old CMake versions, creates some necessary variables for the later +# to run. +# See ./README.md for details. + +set(UPSTREAM_FIND_CUDA_DIR "${CMAKE_CURRENT_LIST_DIR}/upstream/") + +include("${UPSTREAM_FIND_CUDA_DIR}/CMakeInitializeConfigs.cmake") +include("${UPSTREAM_FIND_CUDA_DIR}/FindCUDA.cmake") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/CMakeInitializeConfigs.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/CMakeInitializeConfigs.cmake new file mode 100644 index 0000000000000000000000000000000000000000..5517e8f0624b1e5538b761e1f4891227007d0045 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/CMakeInitializeConfigs.cmake @@ -0,0 +1,40 @@ +# Distributed under the OSI-approved BSD 3-Clause License. See accompanying +# file Copyright.txt or https://cmake.org/licensing for details. + +# Present in upstream, but not supported on versions of cmake we need to support +# include_guard(GLOBAL) + +# Initializes `<_PREFIX>_` variables from the corresponding +# `<_PREFIX>__INIT`, for the configurations currently used. +function(cmake_initialize_per_config_variable _PREFIX _DOCSTRING) + string(STRIP "${${_PREFIX}_INIT}" _INIT) + set("${_PREFIX}" "${_INIT}" + CACHE STRING "${_DOCSTRING} during all build types.") + mark_as_advanced("${_PREFIX}") + + if (NOT CMAKE_NOT_USING_CONFIG_FLAGS) + set(_CONFIGS Debug Release MinSizeRel RelWithDebInfo) + + get_property(_GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) + if (_GENERATOR_IS_MULTI_CONFIG) + list(APPEND _CONFIGS ${CMAKE_CONFIGURATION_TYPES}) + else() + if (NOT CMAKE_NO_BUILD_TYPE) + set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE_INIT}" CACHE STRING + "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel ...") + endif() + list(APPEND _CONFIGS ${CMAKE_BUILD_TYPE}) + endif() + + list(REMOVE_DUPLICATES _CONFIGS) + foreach(_BUILD_TYPE IN LISTS _CONFIGS) + if (NOT "${_BUILD_TYPE}" STREQUAL "") + string(TOUPPER "${_BUILD_TYPE}" _BUILD_TYPE) + string(STRIP "${${_PREFIX}_${_BUILD_TYPE}_INIT}" _INIT) + set("${_PREFIX}_${_BUILD_TYPE}" "${_INIT}" + CACHE STRING "${_DOCSTRING} during ${_BUILD_TYPE} builds.") + mark_as_advanced("${_PREFIX}_${_BUILD_TYPE}") + endif() + endforeach() + endif() +endfunction() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA.cmake new file mode 100644 index 0000000000000000000000000000000000000000..f642072bdc51c8511592ae9c679ca94dd063cadc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA.cmake @@ -0,0 +1,1982 @@ +#.rst: +# FindCUDA +# -------- +# +# .. note:: +# +# The FindCUDA module has been superseded by first-class support +# for the CUDA language in CMake. It is no longer necessary to +# use this module or call ``find_package(CUDA)``. This module +# now exists only for compatibility with projects that have not +# been ported. +# +# Instead, list ``CUDA`` among the languages named in the top-level +# call to the :command:`project` command, or call the +# :command:`enable_language` command with ``CUDA``. +# Then one can add CUDA (``.cu``) sources to programs directly +# in calls to :command:`add_library` and :command:`add_executable`. +# +# Tools for building CUDA C files: libraries and build dependencies. +# +# This script locates the NVIDIA CUDA C tools. It should work on Linux, +# Windows, and macOS and should be reasonably up to date with CUDA C +# releases. +# +# This script makes use of the standard :command:`find_package` arguments of +# ````, ``REQUIRED`` and ``QUIET``. ``CUDA_FOUND`` will report if an +# acceptable version of CUDA was found. +# +# The script will prompt the user to specify ``CUDA_TOOLKIT_ROOT_DIR`` if +# the prefix cannot be determined by the location of nvcc in the system +# path and ``REQUIRED`` is specified to :command:`find_package`. To use +# a different installed version of the toolkit set the environment variable +# ``CUDA_BIN_PATH`` before running cmake (e.g. +# ``CUDA_BIN_PATH=/usr/local/cuda1.0`` instead of the default +# ``/usr/local/cuda``) or set ``CUDA_TOOLKIT_ROOT_DIR`` after configuring. If +# you change the value of ``CUDA_TOOLKIT_ROOT_DIR``, various components that +# depend on the path will be relocated. +# +# It might be necessary to set ``CUDA_TOOLKIT_ROOT_DIR`` manually on certain +# platforms, or to use a CUDA runtime not installed in the default +# location. In newer versions of the toolkit the CUDA library is +# included with the graphics driver -- be sure that the driver version +# matches what is needed by the CUDA runtime version. +# +# The following variables affect the behavior of the macros in the +# script (in alphebetical order). Note that any of these flags can be +# changed multiple times in the same directory before calling +# ``CUDA_ADD_EXECUTABLE``, ``CUDA_ADD_LIBRARY``, ``CUDA_COMPILE``, +# ``CUDA_COMPILE_PTX``, ``CUDA_COMPILE_FATBIN``, ``CUDA_COMPILE_CUBIN`` +# or ``CUDA_WRAP_SRCS``:: +# +# CUDA_64_BIT_DEVICE_CODE (Default matches host bit size) +# -- Set to ON to compile for 64 bit device code, OFF for 32 bit device code. +# Note that making this different from the host code when generating object +# or C files from CUDA code just won't work, because size_t gets defined by +# nvcc in the generated source. If you compile to PTX and then load the +# file yourself, you can mix bit sizes between device and host. +# +# CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE (Default ON) +# -- Set to ON if you want the custom build rule to be attached to the source +# file in Visual Studio. Turn OFF if you add the same cuda file to multiple +# targets. +# +# This allows the user to build the target from the CUDA file; however, bad +# things can happen if the CUDA source file is added to multiple targets. +# When performing parallel builds it is possible for the custom build +# command to be run more than once and in parallel causing cryptic build +# errors. VS runs the rules for every source file in the target, and a +# source can have only one rule no matter how many projects it is added to. +# When the rule is run from multiple targets race conditions can occur on +# the generated file. Eventually everything will get built, but if the user +# is unaware of this behavior, there may be confusion. It would be nice if +# this script could detect the reuse of source files across multiple targets +# and turn the option off for the user, but no good solution could be found. +# +# CUDA_BUILD_CUBIN (Default OFF) +# -- Set to ON to enable and extra compilation pass with the -cubin option in +# Device mode. The output is parsed and register, shared memory usage is +# printed during build. +# +# CUDA_BUILD_EMULATION (Default OFF for device mode) +# -- Set to ON for Emulation mode. -D_DEVICEEMU is defined for CUDA C files +# when CUDA_BUILD_EMULATION is TRUE. +# +# CUDA_LINK_LIBRARIES_KEYWORD (Default "") +# -- The keyword to use for internal +# target_link_libraries calls. The default is to use no keyword which +# uses the old "plain" form of target_link_libraries. Note that is matters +# because whatever is used inside the FindCUDA module must also be used +# outside - the two forms of target_link_libraries cannot be mixed. +# +# CUDA_GENERATED_OUTPUT_DIR (Default CMAKE_CURRENT_BINARY_DIR) +# -- Set to the path you wish to have the generated files placed. If it is +# blank output files will be placed in CMAKE_CURRENT_BINARY_DIR. +# Intermediate files will always be placed in +# CMAKE_CURRENT_BINARY_DIR/CMakeFiles. +# +# CUDA_HOST_COMPILATION_CPP (Default ON) +# -- Set to OFF for C compilation of host code. +# +# CUDA_HOST_COMPILER (Default CMAKE_C_COMPILER) +# -- Set the host compiler to be used by nvcc. Ignored if -ccbin or +# --compiler-bindir is already present in the CUDA_NVCC_FLAGS or +# CUDA_NVCC_FLAGS_ variables. For Visual Studio targets, +# the host compiler is constructed with one or more visual studio macros +# such as $(VCInstallDir), that expands out to the path when +# the command is run from within VS. +# If the CUDAHOSTCXX environment variable is set it will +# be used as the default. +# +# CUDA_NVCC_FLAGS +# CUDA_NVCC_FLAGS_ +# -- Additional NVCC command line arguments. NOTE: multiple arguments must be +# semi-colon delimited (e.g. --compiler-options;-Wall) +# +# CUDA_PROPAGATE_HOST_FLAGS (Default ON) +# -- Set to ON to propagate CMAKE_{C,CXX}_FLAGS and their configuration +# dependent counterparts (e.g. CMAKE_C_FLAGS_DEBUG) automatically to the +# host compiler through nvcc's -Xcompiler flag. This helps make the +# generated host code match the rest of the system better. Sometimes +# certain flags give nvcc problems, and this will help you turn the flag +# propagation off. This does not affect the flags supplied directly to nvcc +# via CUDA_NVCC_FLAGS or through the OPTION flags specified through +# CUDA_ADD_LIBRARY, CUDA_ADD_EXECUTABLE, or CUDA_WRAP_SRCS. Flags used for +# shared library compilation are not affected by this flag. +# +# CUDA_PROPAGATE_HOST_FLAGS_BLACKLIST (Default "") +# -- A list containing the host flags that should not be propagated when +# CUDA_PROPAGATE_HOST_FLAGS is ON. +# +# CUDA_SEPARABLE_COMPILATION (Default OFF) +# -- If set this will enable separable compilation for all CUDA runtime object +# files. If used outside of CUDA_ADD_EXECUTABLE and CUDA_ADD_LIBRARY +# (e.g. calling CUDA_WRAP_SRCS directly), +# CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME and +# CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS should be called. +# +# CUDA_SOURCE_PROPERTY_FORMAT +# -- If this source file property is set, it can override the format specified +# to CUDA_WRAP_SRCS (OBJ, PTX, CUBIN, or FATBIN). If an input source file +# is not a .cu file, setting this file will cause it to be treated as a .cu +# file. See documentation for set_source_files_properties on how to set +# this property. +# +# CUDA_USE_STATIC_CUDA_RUNTIME (Default ON) +# -- When enabled the static version of the CUDA runtime library will be used +# in CUDA_LIBRARIES. If the version of CUDA configured doesn't support +# this option, then it will be silently disabled. +# +# CUDA_VERBOSE_BUILD (Default OFF) +# -- Set to ON to see all the commands used when building the CUDA file. When +# using a Makefile generator the value defaults to VERBOSE (run make +# VERBOSE=1 to see output), although setting CUDA_VERBOSE_BUILD to ON will +# always print the output. +# +# The script creates the following macros (in alphebetical order):: +# +# CUDA_ADD_CUFFT_TO_TARGET( cuda_target ) +# -- Adds the cufft library to the target (can be any target). Handles whether +# you are in emulation mode or not. +# +# CUDA_ADD_CUBLAS_TO_TARGET( cuda_target ) +# -- Adds the cublas library to the target (can be any target). Handles +# whether you are in emulation mode or not. +# +# CUDA_ADD_EXECUTABLE( cuda_target file0 file1 ... +# [WIN32] [MACOSX_BUNDLE] [EXCLUDE_FROM_ALL] [OPTIONS ...] ) +# -- Creates an executable "cuda_target" which is made up of the files +# specified. All of the non CUDA C files are compiled using the standard +# build rules specified by CMAKE and the cuda files are compiled to object +# files using nvcc and the host compiler. In addition CUDA_INCLUDE_DIRS is +# added automatically to include_directories(). Some standard CMake target +# calls can be used on the target after calling this macro +# (e.g. set_target_properties and target_link_libraries), but setting +# properties that adjust compilation flags will not affect code compiled by +# nvcc. Such flags should be modified before calling CUDA_ADD_EXECUTABLE, +# CUDA_ADD_LIBRARY or CUDA_WRAP_SRCS. +# +# CUDA_ADD_LIBRARY( cuda_target file0 file1 ... +# [STATIC | SHARED | MODULE] [EXCLUDE_FROM_ALL] [OPTIONS ...] ) +# -- Same as CUDA_ADD_EXECUTABLE except that a library is created. +# +# CUDA_BUILD_CLEAN_TARGET() +# -- Creates a convenience target that deletes all the dependency files +# generated. You should make clean after running this target to ensure the +# dependency files get regenerated. +# +# CUDA_COMPILE( generated_files file0 file1 ... [STATIC | SHARED | MODULE] +# [OPTIONS ...] ) +# -- Returns a list of generated files from the input source files to be used +# with ADD_LIBRARY or ADD_EXECUTABLE. +# +# CUDA_COMPILE_PTX( generated_files file0 file1 ... [OPTIONS ...] ) +# -- Returns a list of PTX files generated from the input source files. +# +# CUDA_COMPILE_FATBIN( generated_files file0 file1 ... [OPTIONS ...] ) +# -- Returns a list of FATBIN files generated from the input source files. +# +# CUDA_COMPILE_CUBIN( generated_files file0 file1 ... [OPTIONS ...] ) +# -- Returns a list of CUBIN files generated from the input source files. +# +# CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME( output_file_var +# cuda_target +# object_files ) +# -- Compute the name of the intermediate link file used for separable +# compilation. This file name is typically passed into +# CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS. output_file_var is produced +# based on cuda_target the list of objects files that need separable +# compilation as specified by object_files. If the object_files list is +# empty, then output_file_var will be empty. This function is called +# automatically for CUDA_ADD_LIBRARY and CUDA_ADD_EXECUTABLE. Note that +# this is a function and not a macro. +# +# CUDA_INCLUDE_DIRECTORIES( path0 path1 ... ) +# -- Sets the directories that should be passed to nvcc +# (e.g. nvcc -Ipath0 -Ipath1 ... ). These paths usually contain other .cu +# files. +# +# +# CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS( output_file_var cuda_target +# nvcc_flags object_files) +# -- Generates the link object required by separable compilation from the given +# object files. This is called automatically for CUDA_ADD_EXECUTABLE and +# CUDA_ADD_LIBRARY, but can be called manually when using CUDA_WRAP_SRCS +# directly. When called from CUDA_ADD_LIBRARY or CUDA_ADD_EXECUTABLE the +# nvcc_flags passed in are the same as the flags passed in via the OPTIONS +# argument. The only nvcc flag added automatically is the bitness flag as +# specified by CUDA_64_BIT_DEVICE_CODE. Note that this is a function +# instead of a macro. +# +# CUDA_SELECT_NVCC_ARCH_FLAGS(out_variable [target_CUDA_architectures]) +# -- Selects GPU arch flags for nvcc based on target_CUDA_architectures +# target_CUDA_architectures : Auto | Common | All | LIST(ARCH_AND_PTX ...) +# - "Auto" detects local machine GPU compute arch at runtime. +# - "Common" and "All" cover common and entire subsets of architectures +# ARCH_AND_PTX : NAME | NUM.NUM | NUM.NUM(NUM.NUM) | NUM.NUM+PTX +# NAME: Kepler Maxwell Kepler+Tesla Maxwell+Tegra Pascal Volta Turing +# NUM: Any number. Only those pairs are currently accepted by NVCC though: +# 3.5 3.7 5.0 5.2 5.3 6.0 6.1 6.2 7.0 7.2 7.5 +# Returns LIST of flags to be added to CUDA_NVCC_FLAGS in ${out_variable} +# Additionally, sets ${out_variable}_readable to the resulting numeric list +# Example: +# CUDA_SELECT_NVCC_ARCH_FLAGS(ARCH_FLAGS 3.0 3.5+PTX 5.2(5.0) Maxwell) +# LIST(APPEND CUDA_NVCC_FLAGS ${ARCH_FLAGS}) +# +# More info on CUDA architectures: https://en.wikipedia.org/wiki/CUDA +# Note that this is a function instead of a macro. +# +# CUDA_WRAP_SRCS ( cuda_target format generated_files file0 file1 ... +# [STATIC | SHARED | MODULE] [OPTIONS ...] ) +# -- This is where all the magic happens. CUDA_ADD_EXECUTABLE, +# CUDA_ADD_LIBRARY, CUDA_COMPILE, and CUDA_COMPILE_PTX all call this +# function under the hood. +# +# Given the list of files (file0 file1 ... fileN) this macro generates +# custom commands that generate either PTX or linkable objects (use "PTX" or +# "OBJ" for the format argument to switch). Files that don't end with .cu +# or have the HEADER_FILE_ONLY property are ignored. +# +# The arguments passed in after OPTIONS are extra command line options to +# give to nvcc. You can also specify per configuration options by +# specifying the name of the configuration followed by the options. General +# options must precede configuration specific options. Not all +# configurations need to be specified, only the ones provided will be used. +# +# OPTIONS -DFLAG=2 "-DFLAG_OTHER=space in flag" +# DEBUG -g +# RELEASE --use_fast_math +# RELWITHDEBINFO --use_fast_math;-g +# MINSIZEREL --use_fast_math +# +# For certain configurations (namely VS generating object files with +# CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE set to ON), no generated file will +# be produced for the given cuda file. This is because when you add the +# cuda file to Visual Studio it knows that this file produces an object file +# and will link in the resulting object file automatically. +# +# This script will also generate a separate cmake script that is used at +# build time to invoke nvcc. This is for several reasons. +# +# 1. nvcc can return negative numbers as return values which confuses +# Visual Studio into thinking that the command succeeded. The script now +# checks the error codes and produces errors when there was a problem. +# +# 2. nvcc has been known to not delete incomplete results when it +# encounters problems. This confuses build systems into thinking the +# target was generated when in fact an unusable file exists. The script +# now deletes the output files if there was an error. +# +# 3. By putting all the options that affect the build into a file and then +# make the build rule dependent on the file, the output files will be +# regenerated when the options change. +# +# This script also looks at optional arguments STATIC, SHARED, or MODULE to +# determine when to target the object compilation for a shared library. +# BUILD_SHARED_LIBS is ignored in CUDA_WRAP_SRCS, but it is respected in +# CUDA_ADD_LIBRARY. On some systems special flags are added for building +# objects intended for shared libraries. A preprocessor macro, +# _EXPORTS is defined when a shared library compilation is +# detected. +# +# Flags passed into add_definitions with -D or /D are passed along to nvcc. +# +# +# +# The script defines the following variables:: +# +# CUDA_VERSION_MAJOR -- The major version of cuda as reported by nvcc. +# CUDA_VERSION_MINOR -- The minor version. +# CUDA_VERSION +# CUDA_VERSION_STRING -- CUDA_VERSION_MAJOR.CUDA_VERSION_MINOR +# CUDA_HAS_FP16 -- Whether a short float (float16,fp16) is supported. +# +# CUDA_TOOLKIT_ROOT_DIR -- Path to the CUDA Toolkit (defined if not set). +# CUDA_SDK_ROOT_DIR -- Path to the CUDA SDK. Use this to find files in the +# SDK. This script will not directly support finding +# specific libraries or headers, as that isn't +# supported by NVIDIA. If you want to change +# libraries when the path changes see the +# FindCUDA.cmake script for an example of how to clear +# these variables. There are also examples of how to +# use the CUDA_SDK_ROOT_DIR to locate headers or +# libraries, if you so choose (at your own risk). +# CUDA_INCLUDE_DIRS -- Include directory for cuda headers. Added automatically +# for CUDA_ADD_EXECUTABLE and CUDA_ADD_LIBRARY. +# CUDA_LIBRARIES -- Cuda RT library. +# CUDA_CUFFT_LIBRARIES -- Device or emulation library for the Cuda FFT +# implementation (alternative to: +# CUDA_ADD_CUFFT_TO_TARGET macro) +# CUDA_CUBLAS_LIBRARIES -- Device or emulation library for the Cuda BLAS +# implementation (alternative to: +# CUDA_ADD_CUBLAS_TO_TARGET macro). +# CUDA_cudart_static_LIBRARY -- Statically linkable cuda runtime library. +# Only available for CUDA version 5.5+ +# CUDA_cudadevrt_LIBRARY -- Device runtime library. +# Required for separable compilation. +# CUDA_cupti_LIBRARY -- CUDA Profiling Tools Interface library. +# Only available for CUDA version 4.0+. +# CUDA_curand_LIBRARY -- CUDA Random Number Generation library. +# Only available for CUDA version 3.2+. +# CUDA_cusolver_LIBRARY -- CUDA Direct Solver library. +# Only available for CUDA version 7.0+. +# CUDA_cusparse_LIBRARY -- CUDA Sparse Matrix library. +# Only available for CUDA version 3.2+. +# CUDA_npp_LIBRARY -- NVIDIA Performance Primitives lib. +# Only available for CUDA version 4.0+. +# CUDA_nppc_LIBRARY -- NVIDIA Performance Primitives lib (core). +# Only available for CUDA version 5.5+. +# CUDA_nppi_LIBRARY -- NVIDIA Performance Primitives lib (image processing). +# Only available for CUDA version 5.5 - 8.0. +# CUDA_nppial_LIBRARY -- NVIDIA Performance Primitives lib (image processing). +# Only available for CUDA version 9.0. +# CUDA_nppicc_LIBRARY -- NVIDIA Performance Primitives lib (image processing). +# Only available for CUDA version 9.0. +# CUDA_nppicom_LIBRARY -- NVIDIA Performance Primitives lib (image processing). +# Only available for CUDA version 9.0. +# CUDA_nppidei_LIBRARY -- NVIDIA Performance Primitives lib (image processing). +# Only available for CUDA version 9.0. +# CUDA_nppif_LIBRARY -- NVIDIA Performance Primitives lib (image processing). +# Only available for CUDA version 9.0. +# CUDA_nppig_LIBRARY -- NVIDIA Performance Primitives lib (image processing). +# Only available for CUDA version 9.0. +# CUDA_nppim_LIBRARY -- NVIDIA Performance Primitives lib (image processing). +# Only available for CUDA version 9.0. +# CUDA_nppist_LIBRARY -- NVIDIA Performance Primitives lib (image processing). +# Only available for CUDA version 9.0. +# CUDA_nppisu_LIBRARY -- NVIDIA Performance Primitives lib (image processing). +# Only available for CUDA version 9.0. +# CUDA_nppitc_LIBRARY -- NVIDIA Performance Primitives lib (image processing). +# Only available for CUDA version 9.0. +# CUDA_npps_LIBRARY -- NVIDIA Performance Primitives lib (signal processing). +# Only available for CUDA version 5.5+. +# CUDA_nvcuvenc_LIBRARY -- CUDA Video Encoder library. +# Only available for CUDA version 3.2+. +# Windows only. +# CUDA_nvcuvid_LIBRARY -- CUDA Video Decoder library. +# Only available for CUDA version 3.2+. +# Windows only. +# + +# James Bigler, NVIDIA Corp (nvidia.com - jbigler) +# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html +# +# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. +# +# Copyright (c) 2007-2009 +# Scientific Computing and Imaging Institute, University of Utah +# +# This code is licensed under the MIT License. See the FindCUDA.cmake script +# for the text of the license. + +# The MIT License +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +############################################################################### + +# FindCUDA.cmake + +# This macro helps us find the location of helper files we will need the full path to +macro(CUDA_FIND_HELPER_FILE _name _extension) + set(_full_name "${_name}.${_extension}") + # CMAKE_CURRENT_LIST_FILE contains the full path to the file currently being + # processed. Using this variable, we can pull out the current path, and + # provide a way to get access to the other files we need local to here. + get_filename_component(CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) + set(CUDA_${_name} "${CMAKE_CURRENT_LIST_DIR}/FindCUDA/${_full_name}") + if(NOT EXISTS "${CUDA_${_name}}") + set(error_message "${_full_name} not found in ${CMAKE_CURRENT_LIST_DIR}/FindCUDA") + if(CUDA_FIND_REQUIRED) + message(FATAL_ERROR "${error_message}") + else() + if(NOT CUDA_FIND_QUIETLY) + message(STATUS "${error_message}") + endif() + endif() + endif() + # Set this variable as internal, so the user isn't bugged with it. + set(CUDA_${_name} ${CUDA_${_name}} CACHE INTERNAL "Location of ${_full_name}" FORCE) +endmacro() + +##################################################################### +## CUDA_INCLUDE_NVCC_DEPENDENCIES +## + +# So we want to try and include the dependency file if it exists. If +# it doesn't exist then we need to create an empty one, so we can +# include it. + +# If it does exist, then we need to check to see if all the files it +# depends on exist. If they don't then we should clear the dependency +# file and regenerate it later. This covers the case where a header +# file has disappeared or moved. + +macro(CUDA_INCLUDE_NVCC_DEPENDENCIES dependency_file) + set(CUDA_NVCC_DEPEND) + set(CUDA_NVCC_DEPEND_REGENERATE FALSE) + + + # Include the dependency file. Create it first if it doesn't exist . The + # INCLUDE puts a dependency that will force CMake to rerun and bring in the + # new info when it changes. DO NOT REMOVE THIS (as I did and spent a few + # hours figuring out why it didn't work. + if(NOT EXISTS ${dependency_file}) + file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n") + endif() + # Always include this file to force CMake to run again next + # invocation and rebuild the dependencies. + #message("including dependency_file = ${dependency_file}") + include(${dependency_file}) + + # Now we need to verify the existence of all the included files + # here. If they aren't there we need to just blank this variable and + # make the file regenerate again. +# if(DEFINED CUDA_NVCC_DEPEND) +# message("CUDA_NVCC_DEPEND set") +# else() +# message("CUDA_NVCC_DEPEND NOT set") +# endif() + if(CUDA_NVCC_DEPEND) + #message("CUDA_NVCC_DEPEND found") + foreach(f ${CUDA_NVCC_DEPEND}) + # message("searching for ${f}") + if(NOT EXISTS ${f}) + #message("file ${f} not found") + set(CUDA_NVCC_DEPEND_REGENERATE TRUE) + endif() + endforeach() + else() + #message("CUDA_NVCC_DEPEND false") + # No dependencies, so regenerate the file. + set(CUDA_NVCC_DEPEND_REGENERATE TRUE) + endif() + + #message("CUDA_NVCC_DEPEND_REGENERATE = ${CUDA_NVCC_DEPEND_REGENERATE}") + # No incoming dependencies, so we need to generate them. Make the + # output depend on the dependency file itself, which should cause the + # rule to re-run. + if(CUDA_NVCC_DEPEND_REGENERATE) + set(CUDA_NVCC_DEPEND ${dependency_file}) + #message("Generating an empty dependency_file: ${dependency_file}") + file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n") + endif() + +endmacro() + +############################################################################### +############################################################################### +# Setup variables' defaults +############################################################################### +############################################################################### + +# Allow the user to specify if the device code is supposed to be 32 or 64 bit. +if(CMAKE_SIZEOF_VOID_P EQUAL 8) + set(CUDA_64_BIT_DEVICE_CODE_DEFAULT ON) +else() + set(CUDA_64_BIT_DEVICE_CODE_DEFAULT OFF) +endif() +option(CUDA_64_BIT_DEVICE_CODE "Compile device code in 64 bit mode" ${CUDA_64_BIT_DEVICE_CODE_DEFAULT}) + +# Attach the build rule to the source file in VS. This option +option(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE "Attach the build rule to the CUDA source file. Enable only when the CUDA source file is added to at most one target." ON) + +# Prints out extra information about the cuda file during compilation +option(CUDA_BUILD_CUBIN "Generate and parse .cubin files in Device mode." OFF) + +# Set whether we are using emulation or device mode. +option(CUDA_BUILD_EMULATION "Build in Emulation mode" OFF) + +# Where to put the generated output. +set(CUDA_GENERATED_OUTPUT_DIR "" CACHE PATH "Directory to put all the output files. If blank it will default to the CMAKE_CURRENT_BINARY_DIR") + +# Parse HOST_COMPILATION mode. +option(CUDA_HOST_COMPILATION_CPP "Generated file extension" ON) + +# Extra user settable flags +cmake_initialize_per_config_variable(CUDA_NVCC_FLAGS "Semi-colon delimit multiple arguments.") + +if(DEFINED ENV{CUDAHOSTCXX}) + set(CUDA_HOST_COMPILER "$ENV{CUDAHOSTCXX}" CACHE FILEPATH "Host side compiler used by NVCC") +elseif(CMAKE_GENERATOR MATCHES "Visual Studio") + set(_CUDA_MSVC_HOST_COMPILER "$(VCInstallDir)Tools/MSVC/$(VCToolsVersion)/bin/Host$(Platform)/$(PlatformTarget)") + if(MSVC_VERSION LESS 1910) + set(_CUDA_MSVC_HOST_COMPILER "$(VCInstallDir)bin") + endif() + + set(CUDA_HOST_COMPILER "${_CUDA_MSVC_HOST_COMPILER}" CACHE FILEPATH "Host side compiler used by NVCC") + +else() + if(APPLE + AND "${CMAKE_C_COMPILER_ID}" MATCHES "Clang" + AND "${CMAKE_C_COMPILER}" MATCHES "/cc$") + # Using cc which is symlink to clang may let NVCC think it is GCC and issue + # unhandled -dumpspecs option to clang. Also in case neither + # CMAKE_C_COMPILER is defined (project does not use C language) nor + # CUDA_HOST_COMPILER is specified manually we should skip -ccbin and let + # nvcc use its own default C compiler. + # Only care about this on APPLE with clang to avoid + # following symlinks to things like ccache + if(DEFINED CMAKE_C_COMPILER AND NOT DEFINED CUDA_HOST_COMPILER) + get_filename_component(c_compiler_realpath "${CMAKE_C_COMPILER}" REALPATH) + # if the real path does not end up being clang then + # go back to using CMAKE_C_COMPILER + if(NOT "${c_compiler_realpath}" MATCHES "/clang$") + set(c_compiler_realpath "${CMAKE_C_COMPILER}") + endif() + else() + set(c_compiler_realpath "") + endif() + set(CUDA_HOST_COMPILER "${c_compiler_realpath}" CACHE FILEPATH "Host side compiler used by NVCC") + elseif(MSVC AND "${CMAKE_C_COMPILER}" MATCHES "clcache|sccache") + # NVCC does not think it will work if it is passed clcache.exe or sccache.exe + # as the host compiler, which means that builds with CC=cl.exe won't work. + # Best to just feed it whatever the actual cl.exe is as the host compiler. + set(CUDA_HOST_COMPILER "cl.exe" CACHE FILEPATH "Host side compiler used by NVCC") + else() + set(CUDA_HOST_COMPILER "${CMAKE_C_COMPILER}" + CACHE FILEPATH "Host side compiler used by NVCC") + endif() +endif() + +# Propagate the host flags to the host compiler via -Xcompiler +option(CUDA_PROPAGATE_HOST_FLAGS "Propagate C/CXX_FLAGS and friends to the host compiler via -Xcompile" ON) + +# Blacklisted flags to prevent propagation +set(CUDA_PROPAGATE_HOST_FLAGS_BLACKLIST "" CACHE STRING "Blacklisted flags to prevent propagation") + +# Enable CUDA_SEPARABLE_COMPILATION +option(CUDA_SEPARABLE_COMPILATION "Compile CUDA objects with separable compilation enabled. Requires CUDA 5.0+" OFF) + +# Specifies whether the commands used when compiling the .cu file will be printed out. +option(CUDA_VERBOSE_BUILD "Print out the commands run while compiling the CUDA source file. With the Makefile generator this defaults to VERBOSE variable specified on the command line, but can be forced on with this option." OFF) + +mark_as_advanced( + CUDA_64_BIT_DEVICE_CODE + CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE + CUDA_GENERATED_OUTPUT_DIR + CUDA_HOST_COMPILATION_CPP + CUDA_NVCC_FLAGS + CUDA_PROPAGATE_HOST_FLAGS + CUDA_PROPAGATE_HOST_FLAGS_BLACKLIST + CUDA_BUILD_CUBIN + CUDA_BUILD_EMULATION + CUDA_VERBOSE_BUILD + CUDA_SEPARABLE_COMPILATION + ) + +# Single config generators like Makefiles or Ninja don't usually have +# CMAKE_CONFIGURATION_TYPES defined (but note that it can be defined if set by +# projects or developers). Even CMAKE_BUILD_TYPE might not be defined for +# single config generators (and should not be defined for multi-config +# generators). To ensure we get a complete superset of all possible +# configurations, we combine CMAKE_CONFIGURATION_TYPES, CMAKE_BUILD_TYPE and +# all of the standard configurations, then weed out duplicates with +# list(REMOVE_DUPLICATES). Looping over the unique set then ensures we have +# each configuration-specific set of nvcc flags defined and marked as advanced. +set(CUDA_configuration_types ${CMAKE_CONFIGURATION_TYPES} ${CMAKE_BUILD_TYPE} Debug MinSizeRel Release RelWithDebInfo) +list(REMOVE_DUPLICATES CUDA_configuration_types) + +############################################################################### +############################################################################### +# Locate CUDA, Set Build Type, etc. +############################################################################### +############################################################################### + +macro(cuda_unset_include_and_libraries) + unset(CUDA_TOOLKIT_INCLUDE CACHE) + unset(CUDA_CUDART_LIBRARY CACHE) + unset(CUDA_CUDA_LIBRARY CACHE) + # Make sure you run this before you unset CUDA_VERSION. + unset(CUDA_cudart_static_LIBRARY CACHE) + unset(CUDA_cudadevrt_LIBRARY CACHE) + unset(CUDA_cublas_LIBRARY CACHE) + unset(CUDA_cublas_device_LIBRARY CACHE) + unset(CUDA_cublasemu_LIBRARY CACHE) + unset(CUDA_cublasLt_LIBRARY CACHE) + unset(CUDA_cufft_LIBRARY CACHE) + unset(CUDA_cufftemu_LIBRARY CACHE) + unset(CUDA_cupti_LIBRARY CACHE) + unset(CUDA_curand_LIBRARY CACHE) + unset(CUDA_cusolver_LIBRARY CACHE) + unset(CUDA_cusparse_LIBRARY CACHE) + unset(CUDA_npp_LIBRARY CACHE) + unset(CUDA_nppc_LIBRARY CACHE) + unset(CUDA_nppi_LIBRARY CACHE) + unset(CUDA_npps_LIBRARY CACHE) + unset(CUDA_nvcuvenc_LIBRARY CACHE) + unset(CUDA_nvcuvid_LIBRARY CACHE) + unset(CUDA_GPU_DETECT_OUTPUT CACHE) +endmacro() + +# Check to see if the CUDA_TOOLKIT_ROOT_DIR and CUDA_SDK_ROOT_DIR have changed, +# if they have then clear the cache variables, so that will be detected again. +if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}") + unset(CUDA_TOOLKIT_TARGET_DIR CACHE) + unset(CUDA_NVCC_EXECUTABLE CACHE) + cuda_unset_include_and_libraries() + unset(CUDA_VERSION CACHE) +endif() + +if(NOT "${CUDA_TOOLKIT_TARGET_DIR}" STREQUAL "${CUDA_TOOLKIT_TARGET_DIR_INTERNAL}") + cuda_unset_include_and_libraries() +endif() + +# +# End of unset() +# + +# +# Start looking for things +# + +# Search for the cuda distribution. +if(NOT CUDA_TOOLKIT_ROOT_DIR AND NOT CMAKE_CROSSCOMPILING) + # Search in the CUDA_BIN_PATH first. + find_program(CUDA_TOOLKIT_ROOT_DIR_NVCC + NAMES nvcc nvcc.exe + PATHS + ENV CUDA_TOOLKIT_ROOT + ENV CUDA_PATH + ENV CUDA_BIN_PATH + PATH_SUFFIXES bin bin64 + DOC "Toolkit location." + NO_DEFAULT_PATH + ) + + # Now search default paths + find_program(CUDA_TOOLKIT_ROOT_DIR_NVCC + NAMES nvcc nvcc.exe + PATHS /opt/cuda/bin + PATH_SUFFIXES cuda/bin + DOC "Toolkit location." + ) + + if (CUDA_TOOLKIT_ROOT_DIR_NVCC) + get_filename_component(CUDA_TOOLKIT_ROOT_DIR_NVCC_PAR "${CUDA_TOOLKIT_ROOT_DIR_NVCC}" DIRECTORY) + get_filename_component(CUDA_TOOLKIT_ROOT_DIR "${CUDA_TOOLKIT_ROOT_DIR_NVCC_PAR}" DIRECTORY CACHE) + string(REGEX REPLACE "[/\\\\]?bin[64]*[/\\\\]?$" "" CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR}) + # We need to force this back into the cache. + set(CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR} CACHE PATH "Toolkit location." FORCE) + set(CUDA_TOOLKIT_TARGET_DIR ${CUDA_TOOLKIT_ROOT_DIR}) + endif() + unset(CUDA_TOOLKIT_ROOT_DIR_NVCC CACHE) + + if (NOT EXISTS ${CUDA_TOOLKIT_ROOT_DIR}) + if(CUDA_FIND_REQUIRED) + message(FATAL_ERROR "Specify CUDA_TOOLKIT_ROOT_DIR") + elseif(NOT CUDA_FIND_QUIETLY) + message("CUDA_TOOLKIT_ROOT_DIR not found or specified") + endif() + endif () +endif () + +if(CMAKE_CROSSCOMPILING) + SET (CUDA_TOOLKIT_ROOT $ENV{CUDA_TOOLKIT_ROOT}) + if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7-a") + # Support for NVPACK + set (CUDA_TOOLKIT_TARGET_NAMES "armv7-linux-androideabi") + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm") + # Support for arm cross compilation + set(CUDA_TOOLKIT_TARGET_NAMES "armv7-linux-gnueabihf") + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") + # Support for aarch64 cross compilation + if (ANDROID_ARCH_NAME STREQUAL "arm64") + set(CUDA_TOOLKIT_TARGET_NAMES "aarch64-linux-androideabi") + else() + set(CUDA_TOOLKIT_TARGET_NAMES "aarch64-linux" "sbsa-linux") + endif (ANDROID_ARCH_NAME STREQUAL "arm64") + endif() + + foreach(CUDA_TOOLKIT_TARGET_NAME IN LISTS CUDA_TOOLKIT_TARGET_NAMES) + if (EXISTS "${CUDA_TOOLKIT_ROOT}/targets/${CUDA_TOOLKIT_TARGET_NAME}") + set(CUDA_TOOLKIT_TARGET_DIR "${CUDA_TOOLKIT_ROOT}/targets/${CUDA_TOOLKIT_TARGET_NAME}" CACHE PATH "CUDA Toolkit target location.") + SET (CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT} CACHE PATH "Toolkit location." FORCE) + mark_as_advanced(CUDA_TOOLKIT_TARGET_DIR) + break() + endif() + endforeach() + + # add known CUDA targetr root path to the set of directories we search for programs, libraries and headers + set( CMAKE_FIND_ROOT_PATH "${CUDA_TOOLKIT_TARGET_DIR};${CMAKE_FIND_ROOT_PATH}") + macro( cuda_find_host_program ) + if (COMMAND find_host_program) + find_host_program( ${ARGN} ) + else() + find_program( ${ARGN} ) + endif() + endmacro() +else() + # for non-cross-compile, find_host_program == find_program and CUDA_TOOLKIT_TARGET_DIR == CUDA_TOOLKIT_ROOT_DIR + macro( cuda_find_host_program ) + find_program( ${ARGN} ) + endmacro() + SET (CUDA_TOOLKIT_TARGET_DIR ${CUDA_TOOLKIT_ROOT_DIR}) +endif() + + +# CUDA_NVCC_EXECUTABLE +if(DEFINED ENV{CUDA_NVCC_EXECUTABLE}) + set(CUDA_NVCC_EXECUTABLE "$ENV{CUDA_NVCC_EXECUTABLE}" CACHE FILEPATH "The CUDA compiler") +else() + cuda_find_host_program(CUDA_NVCC_EXECUTABLE + NAMES nvcc + PATHS "${CUDA_TOOLKIT_ROOT_DIR}" + ENV CUDA_PATH + ENV CUDA_BIN_PATH + PATH_SUFFIXES bin bin64 + NO_DEFAULT_PATH + ) + # Search default search paths, after we search our own set of paths. + cuda_find_host_program(CUDA_NVCC_EXECUTABLE nvcc) +endif() + +if(CUDA_NVCC_EXECUTABLE AND NOT CUDA_VERSION) + # Compute the version. + execute_process(COMMAND ${CUDA_NVCC_EXECUTABLE} "--version" + OUTPUT_VARIABLE NVCC_OUT + RESULT_VARIABLE NVCC_RC) + if(NOT (${NVCC_RC} EQUAL 0)) + message(WARNING "Failed to execute '${CUDA_NVCC_EXECUTABLE} --version'") + set(CUDA_FOUND FALSE) + return() + endif() + string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\1" CUDA_VERSION_MAJOR ${NVCC_OUT}) + string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\2" CUDA_VERSION_MINOR ${NVCC_OUT}) + set(CUDA_VERSION "${CUDA_VERSION_MAJOR}.${CUDA_VERSION_MINOR}" CACHE STRING "Version of CUDA as computed from nvcc.") + mark_as_advanced(CUDA_VERSION) +else() + # Need to set these based off of the cached value + string(REGEX REPLACE "([0-9]+)\\.([0-9]+).*" "\\1" CUDA_VERSION_MAJOR "${CUDA_VERSION}") + string(REGEX REPLACE "([0-9]+)\\.([0-9]+).*" "\\2" CUDA_VERSION_MINOR "${CUDA_VERSION}") +endif() + +# Always set this convenience variable +set(CUDA_VERSION_STRING "${CUDA_VERSION}") + +# CUDA_TOOLKIT_INCLUDE +find_path(CUDA_TOOLKIT_INCLUDE + device_functions.h # Header included in toolkit + PATHS ${CUDA_TOOLKIT_TARGET_DIR} + ENV CUDA_PATH + ENV CUDA_INC_PATH + PATH_SUFFIXES include + NO_DEFAULT_PATH + ) +# Search default search paths, after we search our own set of paths. +find_path(CUDA_TOOLKIT_INCLUDE device_functions.h) +mark_as_advanced(CUDA_TOOLKIT_INCLUDE) + +set(CUDA_HAS_FP16 TRUE) + +# Set the user list of include dir to nothing to initialize it. +set (CUDA_NVCC_INCLUDE_DIRS_USER "") +set (CUDA_INCLUDE_DIRS ${CUDA_TOOLKIT_INCLUDE}) + +macro(cuda_find_library_local_first_with_path_ext _var _names _doc _path_ext ) + if(CMAKE_SIZEOF_VOID_P EQUAL 8) + # CUDA 3.2+ on Windows moved the library directories, so we need the new + # and old paths. + set(_cuda_64bit_lib_dir "${_path_ext}lib/x64" "${_path_ext}lib64" "${_path_ext}libx64" ) + endif() + # CUDA 3.2+ on Windows moved the library directories, so we need to new + # (lib/Win32) and the old path (lib). + find_library(${_var} + NAMES ${_names} + PATHS "${CUDA_TOOLKIT_TARGET_DIR}" + ENV CUDA_PATH + ENV CUDA_LIB_PATH + PATH_SUFFIXES ${_cuda_64bit_lib_dir} "${_path_ext}lib/Win32" "${_path_ext}lib" "${_path_ext}libWin32" + DOC ${_doc} + NO_DEFAULT_PATH + ) + if (NOT CMAKE_CROSSCOMPILING) + # Search default search paths, after we search our own set of paths. + find_library(${_var} + NAMES ${_names} + PATHS "/usr/lib/nvidia-current" + DOC ${_doc} + ) + endif() +endmacro() + +macro(cuda_find_library_local_first _var _names _doc) + cuda_find_library_local_first_with_path_ext( "${_var}" "${_names}" "${_doc}" "" ) +endmacro() + +macro(find_library_local_first _var _names _doc ) + cuda_find_library_local_first( "${_var}" "${_names}" "${_doc}" "" ) +endmacro() + + +# CUDA_LIBRARIES +cuda_find_library_local_first(CUDA_CUDART_LIBRARY cudart "\"cudart\" library") + +cuda_find_library_local_first(CUDA_cudart_static_LIBRARY cudart_static "static CUDA runtime library") +mark_as_advanced(CUDA_cudart_static_LIBRARY) + + +if(CUDA_cudart_static_LIBRARY) + # If static cudart available, use it by default, but provide a user-visible option to disable it. + option(CUDA_USE_STATIC_CUDA_RUNTIME "Use the static version of the CUDA runtime library if available" ON) +else() + # If not available, silently disable the option. + set(CUDA_USE_STATIC_CUDA_RUNTIME OFF CACHE INTERNAL "") +endif() + +if(CUDA_USE_STATIC_CUDA_RUNTIME) + set(CUDA_CUDART_LIBRARY_VAR CUDA_cudart_static_LIBRARY) +else() + set(CUDA_CUDART_LIBRARY_VAR CUDA_CUDART_LIBRARY) +endif() + +cuda_find_library_local_first(CUDA_cudadevrt_LIBRARY cudadevrt "\"cudadevrt\" library") +mark_as_advanced(CUDA_cudadevrt_LIBRARY) + +if(CUDA_USE_STATIC_CUDA_RUNTIME) + if(UNIX) + # Check for the dependent libraries. Here we look for pthreads. + if (DEFINED CMAKE_THREAD_PREFER_PTHREAD) + set(_cuda_cmake_thread_prefer_pthread ${CMAKE_THREAD_PREFER_PTHREAD}) + endif() + set(CMAKE_THREAD_PREFER_PTHREAD 1) + + # Many of the FindXYZ CMake comes with makes use of try_compile with int main(){return 0;} + # as the source file. Unfortunately this causes a warning with -Wstrict-prototypes and + # -Werror causes the try_compile to fail. We will just temporarily disable other flags + # when doing the find_package command here. + set(_cuda_cmake_c_flags ${CMAKE_C_FLAGS}) + set(CMAKE_C_FLAGS "-fPIC") + find_package(Threads REQUIRED) + set(CMAKE_C_FLAGS ${_cuda_cmake_c_flags}) + + if (DEFINED _cuda_cmake_thread_prefer_pthread) + set(CMAKE_THREAD_PREFER_PTHREAD ${_cuda_cmake_thread_prefer_pthread}) + unset(_cuda_cmake_thread_prefer_pthread) + else() + unset(CMAKE_THREAD_PREFER_PTHREAD) + endif() + + if(NOT APPLE) + #On Linux, you must link against librt when using the static cuda runtime. + find_library(CUDA_rt_LIBRARY rt) + if (NOT CUDA_rt_LIBRARY) + message(WARNING "Expecting to find librt for libcudart_static, but didn't find it.") + endif() + endif() + endif() +endif() + +cuda_find_library_local_first_with_path_ext(CUDA_cupti_LIBRARY cupti "\"cupti\" library" "extras/CUPTI/") +mark_as_advanced(CUDA_cupti_LIBRARY) + +# Set the CUDA_LIBRARIES variable. This is the set of stuff to link against if you are +# using the CUDA runtime. For the dynamic version of the runtime, most of the +# dependencies are brough in, but for the static version there are additional libraries +# and linker commands needed. +# Initialize to empty +set(CUDA_LIBRARIES) + +# If we are using emulation mode and we found the cudartemu library then use +# that one instead of cudart. +if(CUDA_BUILD_EMULATION AND CUDA_CUDARTEMU_LIBRARY) + list(APPEND CUDA_LIBRARIES ${CUDA_CUDARTEMU_LIBRARY}) +elseif(CUDA_USE_STATIC_CUDA_RUNTIME AND CUDA_cudart_static_LIBRARY) + list(APPEND CUDA_LIBRARIES ${CUDA_cudart_static_LIBRARY} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS}) + if (CUDA_rt_LIBRARY) + list(APPEND CUDA_LIBRARIES ${CUDA_rt_LIBRARY}) + endif() + if(APPLE) + # We need to add the default path to the driver (libcuda.dylib) as an rpath, so that + # the static cuda runtime can find it at runtime. + list(APPEND CUDA_LIBRARIES -Wl,-rpath,/usr/local/cuda/lib) + endif() +else() + list(APPEND CUDA_LIBRARIES ${CUDA_CUDART_LIBRARY}) +endif() + +# 1.1 toolkit on linux doesn't appear to have a separate library on +# some platforms. +cuda_find_library_local_first(CUDA_CUDA_LIBRARY cuda "\"cuda\" library (older versions only).") + +mark_as_advanced( + CUDA_CUDA_LIBRARY + CUDA_CUDART_LIBRARY + ) + +####################### +# Look for some of the toolkit helper libraries +macro(FIND_CUDA_HELPER_LIBS _name) + cuda_find_library_local_first(CUDA_${_name}_LIBRARY ${_name} "\"${_name}\" library") + mark_as_advanced(CUDA_${_name}_LIBRARY) +endmacro() + +if(CUDA_BUILD_EMULATION) + message(FATAL_ERROR "CUDA_BUILD_EMULATION is not supported in version 3.1 and onwards. You must disable it to proceed. You have version ${CUDA_VERSION}.") +endif() + +find_cuda_helper_libs(cufft) +find_cuda_helper_libs(cublas) +find_cuda_helper_libs(cublasLt) +# cusparse showed up in version 3.2 +find_cuda_helper_libs(cusparse) +find_cuda_helper_libs(curand) +if (WIN32) + find_cuda_helper_libs(nvcuvenc) + find_cuda_helper_libs(nvcuvid) +endif() + +# In CUDA 9.0 NPP was nppi was removed +find_cuda_helper_libs(nppc) +find_cuda_helper_libs(nppial) +find_cuda_helper_libs(nppicc) +find_cuda_helper_libs(nppicom) +find_cuda_helper_libs(nppidei) +find_cuda_helper_libs(nppif) +find_cuda_helper_libs(nppig) +find_cuda_helper_libs(nppim) +find_cuda_helper_libs(nppist) +find_cuda_helper_libs(nppisu) +find_cuda_helper_libs(nppitc) +find_cuda_helper_libs(npps) +set(CUDA_npp_LIBRARY "${CUDA_nppc_LIBRARY};${CUDA_nppial_LIBRARY};${CUDA_nppicc_LIBRARY};${CUDA_nppicom_LIBRARY};${CUDA_nppidei_LIBRARY};${CUDA_nppif_LIBRARY};${CUDA_nppig_LIBRARY};${CUDA_nppim_LIBRARY};${CUDA_nppist_LIBRARY};${CUDA_nppisu_LIBRARY};${CUDA_nppitc_LIBRARY};${CUDA_npps_LIBRARY}") +# cusolver showed up in version 7.0 +find_cuda_helper_libs(cusolver) + +if (CUDA_BUILD_EMULATION) + set(CUDA_CUFFT_LIBRARIES ${CUDA_cufftemu_LIBRARY}) + set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublasemu_LIBRARY}) +else() + set(CUDA_CUFFT_LIBRARIES ${CUDA_cufft_LIBRARY}) + set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublas_LIBRARY} ${CUDA_cublas_device_LIBRARY} ${CUDA_cublasLt_LIBRARY}) +endif() + +######################## +# Look for the SDK stuff. As of CUDA 3.0 NVSDKCUDA_ROOT has been replaced with +# NVSDKCOMPUTE_ROOT with the old CUDA C contents moved into the C subdirectory +find_path(CUDA_SDK_ROOT_DIR common/inc/cutil.h + HINTS + "$ENV{NVSDKCOMPUTE_ROOT}/C" + ENV NVSDKCUDA_ROOT + "[HKEY_LOCAL_MACHINE\\SOFTWARE\\NVIDIA Corporation\\Installed Products\\NVIDIA SDK 10\\Compute;InstallDir]" + PATHS + "/Developer/GPU\ Computing/C" + ) + +# Keep the CUDA_SDK_ROOT_DIR first in order to be able to override the +# environment variables. +set(CUDA_SDK_SEARCH_PATH + "${CUDA_SDK_ROOT_DIR}" + "${CUDA_TOOLKIT_ROOT_DIR}/local/NVSDK0.2" + "${CUDA_TOOLKIT_ROOT_DIR}/NVSDK0.2" + "${CUDA_TOOLKIT_ROOT_DIR}/NV_CUDA_SDK" + "$ENV{HOME}/NVIDIA_CUDA_SDK" + "$ENV{HOME}/NVIDIA_CUDA_SDK_MACOSX" + "/Developer/CUDA" + ) + +# Example of how to find an include file from the CUDA_SDK_ROOT_DIR + +# find_path(CUDA_CUT_INCLUDE_DIR +# cutil.h +# PATHS ${CUDA_SDK_SEARCH_PATH} +# PATH_SUFFIXES "common/inc" +# DOC "Location of cutil.h" +# NO_DEFAULT_PATH +# ) +# # Now search system paths +# find_path(CUDA_CUT_INCLUDE_DIR cutil.h DOC "Location of cutil.h") + +# mark_as_advanced(CUDA_CUT_INCLUDE_DIR) + + +# Example of how to find a library in the CUDA_SDK_ROOT_DIR + +# # cutil library is called cutil64 for 64 bit builds on windows. We don't want +# # to get these confused, so we are setting the name based on the word size of +# # the build. + +# if(CMAKE_SIZEOF_VOID_P EQUAL 8) +# set(cuda_cutil_name cutil64) +# else() +# set(cuda_cutil_name cutil32) +# endif() + +# find_library(CUDA_CUT_LIBRARY +# NAMES cutil ${cuda_cutil_name} +# PATHS ${CUDA_SDK_SEARCH_PATH} +# # The new version of the sdk shows up in common/lib, but the old one is in lib +# PATH_SUFFIXES "common/lib" "lib" +# DOC "Location of cutil library" +# NO_DEFAULT_PATH +# ) +# # Now search system paths +# find_library(CUDA_CUT_LIBRARY NAMES cutil ${cuda_cutil_name} DOC "Location of cutil library") +# mark_as_advanced(CUDA_CUT_LIBRARY) +# set(CUDA_CUT_LIBRARIES ${CUDA_CUT_LIBRARY}) + + + +############################# +# Check for required components +set(CUDA_FOUND TRUE) + +set(CUDA_TOOLKIT_ROOT_DIR_INTERNAL "${CUDA_TOOLKIT_ROOT_DIR}" CACHE INTERNAL + "This is the value of the last time CUDA_TOOLKIT_ROOT_DIR was set successfully." FORCE) +set(CUDA_TOOLKIT_TARGET_DIR_INTERNAL "${CUDA_TOOLKIT_TARGET_DIR}" CACHE INTERNAL + "This is the value of the last time CUDA_TOOLKIT_TARGET_DIR was set successfully." FORCE) +set(CUDA_SDK_ROOT_DIR_INTERNAL "${CUDA_SDK_ROOT_DIR}" CACHE INTERNAL + "This is the value of the last time CUDA_SDK_ROOT_DIR was set successfully." FORCE) + +include(${CMAKE_CURRENT_LIST_DIR}/FindPackageHandleStandardArgs.cmake) + +find_package_handle_standard_args(CUDA + REQUIRED_VARS + CUDA_TOOLKIT_ROOT_DIR + CUDA_NVCC_EXECUTABLE + CUDA_INCLUDE_DIRS + ${CUDA_CUDART_LIBRARY_VAR} + VERSION_VAR + CUDA_VERSION + ) + + + +############################################################################### +############################################################################### +# Macros +############################################################################### +############################################################################### + +############################################################################### +# Add include directories to pass to the nvcc command. +macro(CUDA_INCLUDE_DIRECTORIES) + foreach(dir ${ARGN}) + list(APPEND CUDA_NVCC_INCLUDE_DIRS_USER ${dir}) + endforeach() +endmacro() + + +############################################################################## +cuda_find_helper_file(parse_cubin cmake) +cuda_find_helper_file(make2cmake cmake) +cuda_find_helper_file(run_nvcc cmake) +include("${CMAKE_CURRENT_LIST_DIR}/FindCUDA/select_compute_arch.cmake") + +############################################################################## +# Separate the OPTIONS out from the sources +# +macro(CUDA_GET_SOURCES_AND_OPTIONS _sources _cmake_options _options) + set( ${_sources} ) + set( ${_cmake_options} ) + set( ${_options} ) + set( _found_options FALSE ) + foreach(arg ${ARGN}) + if("x${arg}" STREQUAL "xOPTIONS") + set( _found_options TRUE ) + elseif( + "x${arg}" STREQUAL "xWIN32" OR + "x${arg}" STREQUAL "xMACOSX_BUNDLE" OR + "x${arg}" STREQUAL "xEXCLUDE_FROM_ALL" OR + "x${arg}" STREQUAL "xSTATIC" OR + "x${arg}" STREQUAL "xSHARED" OR + "x${arg}" STREQUAL "xMODULE" + ) + list(APPEND ${_cmake_options} ${arg}) + else() + if ( _found_options ) + list(APPEND ${_options} ${arg}) + else() + # Assume this is a file + list(APPEND ${_sources} ${arg}) + endif() + endif() + endforeach() +endmacro() + +############################################################################## +# Parse the OPTIONS from ARGN and set the variables prefixed by _option_prefix +# +macro(CUDA_PARSE_NVCC_OPTIONS _option_prefix) + set( _found_config ) + foreach(arg ${ARGN}) + # Determine if we are dealing with a perconfiguration flag + foreach(config ${CUDA_configuration_types}) + string(TOUPPER ${config} config_upper) + if (arg STREQUAL "${config_upper}") + set( _found_config _${arg}) + # Set arg to nothing to keep it from being processed further + set( arg ) + endif() + endforeach() + + if ( arg ) + list(APPEND ${_option_prefix}${_found_config} "${arg}") + endif() + endforeach() +endmacro() + +############################################################################## +# Helper to add the include directory for CUDA only once +function(CUDA_ADD_CUDA_INCLUDE_ONCE) + get_directory_property(_include_directories INCLUDE_DIRECTORIES) + set(_add TRUE) + if(_include_directories) + foreach(dir ${_include_directories}) + if("${dir}" STREQUAL "${CUDA_INCLUDE_DIRS}") + set(_add FALSE) + endif() + endforeach() + endif() + if(_add) + include_directories(${CUDA_INCLUDE_DIRS}) + endif() +endfunction() + +function(CUDA_BUILD_SHARED_LIBRARY shared_flag) + set(cmake_args ${ARGN}) + # If SHARED, MODULE, or STATIC aren't already in the list of arguments, then + # add SHARED or STATIC based on the value of BUILD_SHARED_LIBS. + list(FIND cmake_args SHARED _cuda_found_SHARED) + list(FIND cmake_args MODULE _cuda_found_MODULE) + list(FIND cmake_args STATIC _cuda_found_STATIC) + if( _cuda_found_SHARED GREATER -1 OR + _cuda_found_MODULE GREATER -1 OR + _cuda_found_STATIC GREATER -1) + set(_cuda_build_shared_libs) + else() + if (BUILD_SHARED_LIBS) + set(_cuda_build_shared_libs SHARED) + else() + set(_cuda_build_shared_libs STATIC) + endif() + endif() + set(${shared_flag} ${_cuda_build_shared_libs} PARENT_SCOPE) +endfunction() + +############################################################################## +# Helper to avoid clashes of files with the same basename but different paths. +# This doesn't attempt to do exactly what CMake internals do, which is to only +# add this path when there is a conflict, since by the time a second collision +# in names is detected it's already too late to fix the first one. For +# consistency sake the relative path will be added to all files. +function(CUDA_COMPUTE_BUILD_PATH path build_path) + #message("CUDA_COMPUTE_BUILD_PATH([${path}] ${build_path})") + # Only deal with CMake style paths from here on out + file(TO_CMAKE_PATH "${path}" bpath) + if (IS_ABSOLUTE "${bpath}") + # Absolute paths are generally unnessary, especially if something like + # file(GLOB_RECURSE) is used to pick up the files. + + string(FIND "${bpath}" "${CMAKE_CURRENT_BINARY_DIR}" _binary_dir_pos) + if (_binary_dir_pos EQUAL 0) + file(RELATIVE_PATH bpath "${CMAKE_CURRENT_BINARY_DIR}" "${bpath}") + else() + file(RELATIVE_PATH bpath "${CMAKE_CURRENT_SOURCE_DIR}" "${bpath}") + endif() + endif() + + # This recipe is from cmLocalGenerator::CreateSafeUniqueObjectFileName in the + # CMake source. + + # Remove leading / + string(REGEX REPLACE "^[/]+" "" bpath "${bpath}") + # Avoid absolute paths by removing ':' + string(REPLACE ":" "_" bpath "${bpath}") + # Avoid relative paths that go up the tree + string(REPLACE "../" "__/" bpath "${bpath}") + # Avoid spaces + string(REPLACE " " "_" bpath "${bpath}") + + # Strip off the filename. I wait until here to do it, since removin the + # basename can make a path that looked like path/../basename turn into + # path/.. (notice the trailing slash). + get_filename_component(bpath "${bpath}" PATH) + + set(${build_path} "${bpath}" PARENT_SCOPE) + #message("${build_path} = ${bpath}") +endfunction() + +############################################################################## +# This helper macro populates the following variables and setups up custom +# commands and targets to invoke the nvcc compiler to generate C or PTX source +# dependent upon the format parameter. The compiler is invoked once with -M +# to generate a dependency file and a second time with -cuda or -ptx to generate +# a .cpp or .ptx file. +# INPUT: +# cuda_target - Target name +# format - PTX, CUBIN, FATBIN or OBJ +# FILE1 .. FILEN - The remaining arguments are the sources to be wrapped. +# OPTIONS - Extra options to NVCC +# OUTPUT: +# generated_files - List of generated files +############################################################################## +############################################################################## + +macro(CUDA_WRAP_SRCS cuda_target format generated_files) + + # Put optional arguments in list. + set(_argn_list "${ARGN}") + # If one of the given optional arguments is "PHONY", make a note of it, then + # remove it from the list. + list(FIND _argn_list "PHONY" _phony_idx) + if("${_phony_idx}" GREATER "-1") + set(_target_is_phony true) + list(REMOVE_AT _argn_list ${_phony_idx}) + else() + set(_target_is_phony false) + endif() + + # If CMake doesn't support separable compilation, complain + if(CUDA_SEPARABLE_COMPILATION AND CMAKE_VERSION VERSION_LESS "2.8.10.1") + message(SEND_ERROR "CUDA_SEPARABLE_COMPILATION isn't supported for CMake versions less than 2.8.10.1") + endif() + + # Set up all the command line flags here, so that they can be overridden on a per target basis. + + set(nvcc_flags "") + + # Emulation if the card isn't present. + if (CUDA_BUILD_EMULATION) + # Emulation. + set(nvcc_flags ${nvcc_flags} --device-emulation -D_DEVICEEMU -g) + else() + # Device mode. No flags necessary. + endif() + + if(CUDA_HOST_COMPILATION_CPP) + set(CUDA_C_OR_CXX CXX) + else() + message(WARNING "--host-compilation flag is deprecated in CUDA version >= 3.0. Removing --host-compilation C flag" ) + set(CUDA_C_OR_CXX C) + endif() + + set(generated_extension ${CMAKE_${CUDA_C_OR_CXX}_OUTPUT_EXTENSION}) + + if(CUDA_64_BIT_DEVICE_CODE) + set(nvcc_flags ${nvcc_flags} -m64) + else() + set(nvcc_flags ${nvcc_flags} -m32) + endif() + + if(CUDA_TARGET_CPU_ARCH) + set(nvcc_flags ${nvcc_flags} "--target-cpu-architecture=${CUDA_TARGET_CPU_ARCH}") + endif() + + # This needs to be passed in at this stage, because VS needs to fill out the + # various macros from within VS. Note that CCBIN is only used if + # -ccbin or --compiler-bindir isn't used and CUDA_HOST_COMPILER matches + # _CUDA_MSVC_HOST_COMPILER + if(CMAKE_GENERATOR MATCHES "Visual Studio") + set(ccbin_flags -D "\"CCBIN:PATH=${_CUDA_MSVC_HOST_COMPILER}\"" ) + else() + set(ccbin_flags) + endif() + + # Figure out which configure we will use and pass that in as an argument to + # the script. We need to defer the decision until compilation time, because + # for VS projects we won't know if we are making a debug or release build + # until build time. + if(CMAKE_GENERATOR MATCHES "Visual Studio") + set( CUDA_build_configuration "$(ConfigurationName)" ) + else() + set( CUDA_build_configuration "${CMAKE_BUILD_TYPE}") + endif() + + # Initialize our list of includes with the user ones followed by the CUDA system ones. + set(CUDA_NVCC_INCLUDE_DIRS ${CUDA_NVCC_INCLUDE_DIRS_USER} "${CUDA_INCLUDE_DIRS}") + if(_target_is_phony) + # If the passed in target name isn't a real target (i.e., this is from a call to one of the + # cuda_compile_* functions), need to query directory properties to get include directories + # and compile definitions. + get_directory_property(_dir_include_dirs INCLUDE_DIRECTORIES) + get_directory_property(_dir_compile_defs COMPILE_DEFINITIONS) + + list(APPEND CUDA_NVCC_INCLUDE_DIRS "${_dir_include_dirs}") + set(CUDA_NVCC_COMPILE_DEFINITIONS "${_dir_compile_defs}") + else() + # Append the include directories for this target via generator expression, which is + # expanded by the FILE(GENERATE) call below. This generator expression captures all + # include dirs set by the user, whether via directory properties or target properties + list(APPEND CUDA_NVCC_INCLUDE_DIRS "$") + + # Do the same thing with compile definitions + set(CUDA_NVCC_COMPILE_DEFINITIONS "$") + endif() + + + # Reset these variables + set(CUDA_WRAP_OPTION_NVCC_FLAGS) + foreach(config ${CUDA_configuration_types}) + string(TOUPPER ${config} config_upper) + set(CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper}) + endforeach() + + CUDA_GET_SOURCES_AND_OPTIONS(_cuda_wrap_sources _cuda_wrap_cmake_options _cuda_wrap_options ${_argn_list}) + CUDA_PARSE_NVCC_OPTIONS(CUDA_WRAP_OPTION_NVCC_FLAGS ${_cuda_wrap_options}) + + # Figure out if we are building a shared library. BUILD_SHARED_LIBS is + # respected in CUDA_ADD_LIBRARY. + set(_cuda_build_shared_libs FALSE) + # SHARED, MODULE + list(FIND _cuda_wrap_cmake_options SHARED _cuda_found_SHARED) + list(FIND _cuda_wrap_cmake_options MODULE _cuda_found_MODULE) + if(_cuda_found_SHARED GREATER -1 OR _cuda_found_MODULE GREATER -1) + set(_cuda_build_shared_libs TRUE) + endif() + # STATIC + list(FIND _cuda_wrap_cmake_options STATIC _cuda_found_STATIC) + if(_cuda_found_STATIC GREATER -1) + set(_cuda_build_shared_libs FALSE) + endif() + + # CUDA_HOST_FLAGS + if(_cuda_build_shared_libs) + # If we are setting up code for a shared library, then we need to add extra flags for + # compiling objects for shared libraries. + set(CUDA_HOST_SHARED_FLAGS ${CMAKE_SHARED_LIBRARY_${CUDA_C_OR_CXX}_FLAGS}) + else() + set(CUDA_HOST_SHARED_FLAGS) + endif() + + macro(_filter_blocklisted_host_flags CUDA_FLAGS) + string(REGEX REPLACE "[ \t]+" ";" ${CUDA_FLAGS} "${${CUDA_FLAGS}}") + foreach(_blacklisted ${CUDA_PROPAGATE_HOST_FLAGS_BLACKLIST}) + list(REMOVE_ITEM ${CUDA_FLAGS} "${_blacklisted}") + endforeach() + string(REPLACE ";" " " ${CUDA_FLAGS} "${${CUDA_FLAGS}}") + endmacro() + + # Only add the CMAKE_{C,CXX}_FLAGS if we are propagating host flags. We + # always need to set the SHARED_FLAGS, though. + if(CUDA_PROPAGATE_HOST_FLAGS) + set(_cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS}") + _filter_blocklisted_host_flags(_cuda_C_FLAGS) + set(_cuda_host_flags "set(CMAKE_HOST_FLAGS ${_cuda_C_FLAGS} ${CUDA_HOST_SHARED_FLAGS})") + else() + set(_cuda_host_flags "set(CMAKE_HOST_FLAGS ${CUDA_HOST_SHARED_FLAGS})") + endif() + + set(_cuda_nvcc_flags_config "# Build specific configuration flags") + # Loop over all the configuration types to generate appropriate flags for run_nvcc.cmake + foreach(config ${CUDA_configuration_types}) + string(TOUPPER ${config} config_upper) + # CMAKE_FLAGS are strings and not lists. By not putting quotes around CMAKE_FLAGS + # we convert the strings to lists (like we want). + + if(CUDA_PROPAGATE_HOST_FLAGS) + # nvcc chokes on -g3 in versions previous to 3.0, so replace it with -g + set(_cuda_fix_g3 FALSE) + + set(_cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}") + _filter_blocklisted_host_flags(_cuda_C_FLAGS) + if(_cuda_fix_g3) + string(REPLACE "-g3" "-g" _cuda_C_FLAGS "${_cuda_C_FLAGS}") + endif() + + string(APPEND _cuda_host_flags "\nset(CMAKE_HOST_FLAGS_${config_upper} ${_cuda_C_FLAGS})") + endif() + + # Note that if we ever want CUDA_NVCC_FLAGS_ to be string (instead of a list + # like it is currently), we can remove the quotes around the + # ${CUDA_NVCC_FLAGS_${config_upper}} variable like the CMAKE_HOST_FLAGS_ variable. + string(APPEND _cuda_nvcc_flags_config "\nset(CUDA_NVCC_FLAGS_${config_upper} ${CUDA_NVCC_FLAGS_${config_upper}} ;; ${CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper}})") + endforeach() + + # Process the C++14 flag. If the host sets the flag, we need to add it to nvcc and + # remove it from the host. This is because -Xcompile -std=c++ will choke nvcc (it uses + # the C preprocessor). In order to get this to work correctly, we need to use nvcc's + # specific c++14 flag. + if( "${_cuda_host_flags}" MATCHES "-std=c\\+\\+11") + # Add the c++14 flag to nvcc if it isn't already present. Note that we only look at + # the main flag instead of the configuration specific flags. + if( NOT "${CUDA_NVCC_FLAGS}" MATCHES "-std=c\\+\\+14" ) + list(APPEND nvcc_flags --std c++14) + endif() + string(REGEX REPLACE "[-]+std=c\\+\\+14" "" _cuda_host_flags "${_cuda_host_flags}") + endif() + + if(_cuda_build_shared_libs) + list(APPEND nvcc_flags "-D${cuda_target}_EXPORTS") + endif() + + # Reset the output variable + set(_cuda_wrap_generated_files "") + + # Iterate over the macro arguments and create custom + # commands for all the .cu files. + foreach(file ${_argn_list}) + # Ignore any file marked as a HEADER_FILE_ONLY + get_source_file_property(_is_header ${file} HEADER_FILE_ONLY) + # Allow per source file overrides of the format. Also allows compiling non-.cu files. + get_source_file_property(_cuda_source_format ${file} CUDA_SOURCE_PROPERTY_FORMAT) + if((${file} MATCHES "\\.cu$" OR _cuda_source_format) AND NOT _is_header) + + if(NOT _cuda_source_format) + set(_cuda_source_format ${format}) + endif() + # If file isn't a .cu file, we need to tell nvcc to treat it as such. + if(NOT file MATCHES "\\.cu$") + set(cuda_language_flag -x=cu) + else() + set(cuda_language_flag) + endif() + + if( ${_cuda_source_format} MATCHES "OBJ") + set( cuda_compile_to_external_module OFF ) + else() + set( cuda_compile_to_external_module ON ) + if( ${_cuda_source_format} MATCHES "PTX" ) + set( cuda_compile_to_external_module_type "ptx" ) + elseif( ${_cuda_source_format} MATCHES "CUBIN") + set( cuda_compile_to_external_module_type "cubin" ) + elseif( ${_cuda_source_format} MATCHES "FATBIN") + set( cuda_compile_to_external_module_type "fatbin" ) + else() + message( FATAL_ERROR "Invalid format flag passed to CUDA_WRAP_SRCS or set with CUDA_SOURCE_PROPERTY_FORMAT file property for file '${file}': '${_cuda_source_format}'. Use OBJ, PTX, CUBIN or FATBIN.") + endif() + endif() + + if(cuda_compile_to_external_module) + # Don't use any of the host compilation flags for PTX targets. + set(CUDA_HOST_FLAGS) + set(CUDA_NVCC_FLAGS_CONFIG) + else() + set(CUDA_HOST_FLAGS ${_cuda_host_flags}) + set(CUDA_NVCC_FLAGS_CONFIG ${_cuda_nvcc_flags_config}) + endif() + + # Determine output directory + cuda_compute_build_path("${file}" cuda_build_path) + set(cuda_compile_intermediate_directory "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${cuda_target}.dir/${cuda_build_path}") + if(CUDA_GENERATED_OUTPUT_DIR) + set(cuda_compile_output_dir "${CUDA_GENERATED_OUTPUT_DIR}") + else() + if ( cuda_compile_to_external_module ) + set(cuda_compile_output_dir "${CMAKE_CURRENT_BINARY_DIR}") + else() + set(cuda_compile_output_dir "${cuda_compile_intermediate_directory}") + endif() + endif() + + # Add a custom target to generate a c or ptx file. ###################### + + get_filename_component( basename ${file} NAME ) + if( cuda_compile_to_external_module ) + set(generated_file_path "${cuda_compile_output_dir}") + set(generated_file_basename "${cuda_target}_generated_${basename}.${cuda_compile_to_external_module_type}") + set(format_flag "-${cuda_compile_to_external_module_type}") + file(MAKE_DIRECTORY "${cuda_compile_output_dir}") + else() + set(generated_file_path "${cuda_compile_output_dir}/${CMAKE_CFG_INTDIR}") + set(generated_file_basename "${cuda_target}_generated_${basename}${generated_extension}") + if(CUDA_SEPARABLE_COMPILATION) + set(format_flag "-dc") + else() + set(format_flag "-c") + endif() + endif() + + # Set all of our file names. Make sure that whatever filenames that have + # generated_file_path in them get passed in through as a command line + # argument, so that the ${CMAKE_CFG_INTDIR} gets expanded at run time + # instead of configure time. + set(generated_file "${generated_file_path}/${generated_file_basename}") + set(cmake_dependency_file "${cuda_compile_intermediate_directory}/${generated_file_basename}.depend") + set(NVCC_generated_dependency_file "${cuda_compile_intermediate_directory}/${generated_file_basename}.NVCC-depend") + set(generated_cubin_file "${generated_file_path}/${generated_file_basename}.cubin.txt") + set(custom_target_script_pregen "${cuda_compile_intermediate_directory}/${generated_file_basename}.cmake.pre-gen") + set(custom_target_script "${cuda_compile_intermediate_directory}/${generated_file_basename}$<$>:.$>.cmake") + + # Setup properties for obj files: + if( NOT cuda_compile_to_external_module ) + set_source_files_properties("${generated_file}" + PROPERTIES + EXTERNAL_OBJECT true # This is an object file not to be compiled, but only be linked. + ) + endif() + + # Don't add CMAKE_CURRENT_SOURCE_DIR if the path is already an absolute path. + get_filename_component(file_path "${file}" PATH) + if(IS_ABSOLUTE "${file_path}") + set(source_file "${file}") + else() + set(source_file "${CMAKE_CURRENT_SOURCE_DIR}/${file}") + endif() + + if( NOT cuda_compile_to_external_module AND CUDA_SEPARABLE_COMPILATION) + list(APPEND ${cuda_target}_SEPARABLE_COMPILATION_OBJECTS "${generated_file}") + endif() + + # Bring in the dependencies. Creates a variable CUDA_NVCC_DEPEND ####### + cuda_include_nvcc_dependencies(${cmake_dependency_file}) + + # Convenience string for output ######################################### + if(CUDA_BUILD_EMULATION) + set(cuda_build_type "Emulation") + else() + set(cuda_build_type "Device") + endif() + + # Build the NVCC made dependency file ################################### + set(build_cubin OFF) + if ( NOT CUDA_BUILD_EMULATION AND CUDA_BUILD_CUBIN ) + if ( NOT cuda_compile_to_external_module ) + set ( build_cubin ON ) + endif() + endif() + + # Configure the build script + configure_file("${CUDA_run_nvcc}" "${custom_target_script_pregen}" @ONLY) + file(GENERATE + OUTPUT "${custom_target_script}" + INPUT "${custom_target_script_pregen}" + ) + + # So if a user specifies the same cuda file as input more than once, you + # can have bad things happen with dependencies. Here we check an option + # to see if this is the behavior they want. + if(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE) + set(main_dep MAIN_DEPENDENCY ${source_file}) + else() + set(main_dep DEPENDS ${source_file}) + endif() + + if(CUDA_VERBOSE_BUILD) + set(verbose_output ON) + elseif(CMAKE_GENERATOR MATCHES "Makefiles") + set(verbose_output "$(VERBOSE)") + # This condition lets us also turn on verbose output when someone + # specifies CMAKE_VERBOSE_MAKEFILE, even if the generator isn't + # the Makefiles generator (this is important for us, Ninja users.) + elseif(CMAKE_VERBOSE_MAKEFILE) + set(verbose_output ON) + else() + set(verbose_output OFF) + endif() + + # Create up the comment string + file(RELATIVE_PATH generated_file_relative_path "${CMAKE_BINARY_DIR}" "${generated_file}") + if(cuda_compile_to_external_module) + set(cuda_build_comment_string "Building NVCC ${cuda_compile_to_external_module_type} file ${generated_file_relative_path}") + else() + set(cuda_build_comment_string "Building NVCC (${cuda_build_type}) object ${generated_file_relative_path}") + endif() + + set(_verbatim VERBATIM) + if(ccbin_flags MATCHES "\\$\\(VCInstallDir\\)") + set(_verbatim "") + endif() + + # Build the generated file and dependency file ########################## + add_custom_command( + OUTPUT ${generated_file} + # These output files depend on the source_file and the contents of cmake_dependency_file + ${main_dep} + DEPENDS ${CUDA_NVCC_DEPEND} + DEPENDS ${custom_target_script} + # Make sure the output directory exists before trying to write to it. + COMMAND ${CMAKE_COMMAND} -E make_directory "${generated_file_path}" + COMMAND ${CMAKE_COMMAND} ARGS + -D verbose:BOOL=${verbose_output} + ${ccbin_flags} + -D build_configuration:STRING=${CUDA_build_configuration} + -D "generated_file:STRING=${generated_file}" + -D "generated_cubin_file:STRING=${generated_cubin_file}" + -P "${custom_target_script}" + WORKING_DIRECTORY "${cuda_compile_intermediate_directory}" + COMMENT "${cuda_build_comment_string}" + ${_verbatim} + ) + + # Make sure the build system knows the file is generated. + set_source_files_properties(${generated_file} PROPERTIES GENERATED TRUE) + + list(APPEND _cuda_wrap_generated_files ${generated_file}) + + # Add the other files that we want cmake to clean on a cleanup ########## + list(APPEND CUDA_ADDITIONAL_CLEAN_FILES "${cmake_dependency_file}") + list(REMOVE_DUPLICATES CUDA_ADDITIONAL_CLEAN_FILES) + set(CUDA_ADDITIONAL_CLEAN_FILES ${CUDA_ADDITIONAL_CLEAN_FILES} CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.") + + endif() + endforeach() + + # Set the return parameter + set(${generated_files} ${_cuda_wrap_generated_files}) +endmacro() + +function(_cuda_get_important_host_flags important_flags flag_string) + if(CMAKE_GENERATOR MATCHES "Visual Studio") + string(REGEX MATCHALL "/M[DT][d]?" flags "${flag_string}") + list(APPEND ${important_flags} ${flags}) + else() + string(REGEX MATCHALL "-fPIC" flags "${flag_string}") + list(APPEND ${important_flags} ${flags}) + endif() + set(${important_flags} ${${important_flags}} PARENT_SCOPE) +endfunction() + +############################################################################### +############################################################################### +# Separable Compilation Link +############################################################################### +############################################################################### + +# Compute the filename to be used by CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS +function(CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME output_file_var cuda_target object_files) + if (object_files) + set(generated_extension ${CMAKE_${CUDA_C_OR_CXX}_OUTPUT_EXTENSION}) + set(output_file "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${cuda_target}.dir/${CMAKE_CFG_INTDIR}/${cuda_target}_intermediate_link${generated_extension}") + else() + set(output_file) + endif() + + set(${output_file_var} "${output_file}" PARENT_SCOPE) +endfunction() + +# Setup the build rule for the separable compilation intermediate link file. +function(CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS output_file cuda_target options object_files) + if (object_files) + + set_source_files_properties("${output_file}" + PROPERTIES + EXTERNAL_OBJECT TRUE # This is an object file not to be compiled, but only + # be linked. + GENERATED TRUE # This file is generated during the build + ) + + # For now we are ignoring all the configuration specific flags. + set(nvcc_flags) + CUDA_PARSE_NVCC_OPTIONS(nvcc_flags ${options}) + if(CUDA_64_BIT_DEVICE_CODE) + list(APPEND nvcc_flags -m64) + else() + list(APPEND nvcc_flags -m32) + endif() + # If -ccbin, --compiler-bindir has been specified, don't do anything. Otherwise add it here. + list( FIND nvcc_flags "-ccbin" ccbin_found0 ) + list( FIND nvcc_flags "--compiler-bindir" ccbin_found1 ) + if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER ) + # Match VERBATIM check below. + if(CUDA_HOST_COMPILER MATCHES "\\$\\(VCInstallDir\\)") + list(APPEND nvcc_flags -ccbin "\"${CUDA_HOST_COMPILER}\"") + else() + list(APPEND nvcc_flags -ccbin "${CUDA_HOST_COMPILER}") + endif() + endif() + + # Create a list of flags specified by CUDA_NVCC_FLAGS_${CONFIG} and CMAKE_${CUDA_C_OR_CXX}_FLAGS* + set(config_specific_flags) + set(flags) + foreach(config ${CUDA_configuration_types}) + string(TOUPPER ${config} config_upper) + # Add config specific flags + foreach(f ${CUDA_NVCC_FLAGS_${config_upper}}) + list(APPEND config_specific_flags $<$:${f}>) + endforeach() + set(important_host_flags) + _cuda_get_important_host_flags(important_host_flags "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}") + foreach(f ${important_host_flags}) + list(APPEND flags $<$:-Xcompiler> $<$:${f}>) + endforeach() + endforeach() + # Add CMAKE_${CUDA_C_OR_CXX}_FLAGS + set(important_host_flags) + _cuda_get_important_host_flags(important_host_flags "${CMAKE_${CUDA_C_OR_CXX}_FLAGS}") + foreach(f ${important_host_flags}) + list(APPEND flags -Xcompiler ${f}) + endforeach() + + # Add our general CUDA_NVCC_FLAGS with the configuration specifig flags + set(nvcc_flags ${CUDA_NVCC_FLAGS} ${config_specific_flags} ${nvcc_flags}) + + file(RELATIVE_PATH output_file_relative_path "${CMAKE_BINARY_DIR}" "${output_file}") + + # Some generators don't handle the multiple levels of custom command + # dependencies correctly (obj1 depends on file1, obj2 depends on obj1), so + # we work around that issue by compiling the intermediate link object as a + # pre-link custom command in that situation. + set(do_obj_build_rule TRUE) + if (MSVC_VERSION GREATER 1599 AND MSVC_VERSION LESS 1800) + # VS 2010 and 2012 have this problem. + set(do_obj_build_rule FALSE) + endif() + + set(_verbatim VERBATIM) + if(nvcc_flags MATCHES "\\$\\(VCInstallDir\\)") + set(_verbatim "") + endif() + + if (do_obj_build_rule) + add_custom_command( + OUTPUT ${output_file} + DEPENDS ${object_files} + COMMAND ${CUDA_NVCC_EXECUTABLE} ${nvcc_flags} -dlink ${object_files} -o ${output_file} + ${flags} + COMMENT "Building NVCC intermediate link file ${output_file_relative_path}" + COMMAND_EXPAND_LISTS + ${_verbatim} + ) + else() + get_filename_component(output_file_dir "${output_file}" DIRECTORY) + add_custom_command( + TARGET ${cuda_target} + PRE_LINK + COMMAND ${CMAKE_COMMAND} -E echo "Building NVCC intermediate link file ${output_file_relative_path}" + COMMAND ${CMAKE_COMMAND} -E make_directory "${output_file_dir}" + COMMAND ${CUDA_NVCC_EXECUTABLE} ${nvcc_flags} ${flags} -dlink ${object_files} -o "${output_file}" + COMMAND_EXPAND_LISTS + ${_verbatim} + ) + endif() + endif() +endfunction() + +############################################################################### +############################################################################### +# ADD LIBRARY +############################################################################### +############################################################################### +macro(CUDA_ADD_LIBRARY cuda_target) + + CUDA_ADD_CUDA_INCLUDE_ONCE() + + # Separate the sources from the options + CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN}) + CUDA_BUILD_SHARED_LIBRARY(_cuda_shared_flag ${ARGN}) + # Create custom commands and targets for each file. + CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} + ${_cmake_options} ${_cuda_shared_flag} + OPTIONS ${_options} ) + + # Compute the file name of the intermedate link file used for separable + # compilation. + CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME(link_file ${cuda_target} "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}") + + # Add the library. + add_library(${cuda_target} ${_cmake_options} + ${_generated_files} + ${_sources} + ${link_file} + ) + + # Add a link phase for the separable compilation if it has been enabled. If + # it has been enabled then the ${cuda_target}_SEPARABLE_COMPILATION_OBJECTS + # variable will have been defined. + CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS("${link_file}" ${cuda_target} "${_options}" "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}") + + target_link_libraries(${cuda_target} ${CUDA_LINK_LIBRARIES_KEYWORD} + ${CUDA_LIBRARIES} + ) + + if(CUDA_SEPARABLE_COMPILATION) + target_link_libraries(${cuda_target} ${CUDA_LINK_LIBRARIES_KEYWORD} + ${CUDA_cudadevrt_LIBRARY} + ) + endif() + + # We need to set the linker language based on what the expected generated file + # would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP. + set_target_properties(${cuda_target} + PROPERTIES + LINKER_LANGUAGE ${CUDA_C_OR_CXX} + ) + +endmacro() + + +############################################################################### +############################################################################### +# ADD EXECUTABLE +############################################################################### +############################################################################### +macro(CUDA_ADD_EXECUTABLE cuda_target) + + CUDA_ADD_CUDA_INCLUDE_ONCE() + + # Separate the sources from the options + CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN}) + # Create custom commands and targets for each file. + CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} OPTIONS ${_options} ) + + # Compute the file name of the intermedate link file used for separable + # compilation. + CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME(link_file ${cuda_target} "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}") + + # Add the library. + add_executable(${cuda_target} ${_cmake_options} + ${_generated_files} + ${_sources} + ${link_file} + ) + + # Add a link phase for the separable compilation if it has been enabled. If + # it has been enabled then the ${cuda_target}_SEPARABLE_COMPILATION_OBJECTS + # variable will have been defined. + CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS("${link_file}" ${cuda_target} "${_options}" "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}") + + target_link_libraries(${cuda_target} ${CUDA_LINK_LIBRARIES_KEYWORD} + ${CUDA_LIBRARIES} + ) + + # We need to set the linker language based on what the expected generated file + # would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP. + set_target_properties(${cuda_target} + PROPERTIES + LINKER_LANGUAGE ${CUDA_C_OR_CXX} + ) + +endmacro() + + +############################################################################### +############################################################################### +# (Internal) helper for manually added cuda source files with specific targets +############################################################################### +############################################################################### +macro(cuda_compile_base cuda_target format generated_files) + # Update a counter in this directory, to keep phony target names unique. + set(_cuda_target "${cuda_target}") + get_property(_counter DIRECTORY PROPERTY _cuda_internal_phony_counter) + if(_counter) + math(EXPR _counter "${_counter} + 1") + else() + set(_counter 1) + endif() + string(APPEND _cuda_target "_${_counter}") + set_property(DIRECTORY PROPERTY _cuda_internal_phony_counter ${_counter}) + + # Separate the sources from the options + CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN}) + + # Create custom commands and targets for each file. + CUDA_WRAP_SRCS( ${_cuda_target} ${format} _generated_files ${_sources} + ${_cmake_options} OPTIONS ${_options} PHONY) + + set( ${generated_files} ${_generated_files}) + +endmacro() + +############################################################################### +############################################################################### +# CUDA COMPILE +############################################################################### +############################################################################### +macro(CUDA_COMPILE generated_files) + cuda_compile_base(cuda_compile OBJ ${generated_files} ${ARGN}) +endmacro() + +############################################################################### +############################################################################### +# CUDA COMPILE PTX +############################################################################### +############################################################################### +macro(CUDA_COMPILE_PTX generated_files) + cuda_compile_base(cuda_compile_ptx PTX ${generated_files} ${ARGN}) +endmacro() + +############################################################################### +############################################################################### +# CUDA COMPILE FATBIN +############################################################################### +############################################################################### +macro(CUDA_COMPILE_FATBIN generated_files) + cuda_compile_base(cuda_compile_fatbin FATBIN ${generated_files} ${ARGN}) +endmacro() + +############################################################################### +############################################################################### +# CUDA COMPILE CUBIN +############################################################################### +############################################################################### +macro(CUDA_COMPILE_CUBIN generated_files) + cuda_compile_base(cuda_compile_cubin CUBIN ${generated_files} ${ARGN}) +endmacro() + + +############################################################################### +############################################################################### +# CUDA ADD CUFFT TO TARGET +############################################################################### +############################################################################### +macro(CUDA_ADD_CUFFT_TO_TARGET target) + if (CUDA_BUILD_EMULATION) + target_link_libraries(${target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_cufftemu_LIBRARY}) + else() + target_link_libraries(${target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_cufft_LIBRARY}) + endif() +endmacro() + +############################################################################### +############################################################################### +# CUDA ADD CUBLAS TO TARGET +############################################################################### +############################################################################### +macro(CUDA_ADD_CUBLAS_TO_TARGET target) + if (CUDA_BUILD_EMULATION) + target_link_libraries(${target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_cublasemu_LIBRARY}) + else() + target_link_libraries(${target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_cublas_LIBRARY} ${CUDA_cublas_device_LIBRARY} ${CUDA_cublasLt_LIBRARY}) + endif() +endmacro() + +############################################################################### +############################################################################### +# CUDA BUILD CLEAN TARGET +############################################################################### +############################################################################### +macro(CUDA_BUILD_CLEAN_TARGET) + # Call this after you add all your CUDA targets, and you will get a + # convenience target. You should also make clean after running this target + # to get the build system to generate all the code again. + + set(cuda_clean_target_name clean_cuda_depends) + if (CMAKE_GENERATOR MATCHES "Visual Studio") + string(TOUPPER ${cuda_clean_target_name} cuda_clean_target_name) + endif() + add_custom_target(${cuda_clean_target_name} + COMMAND ${CMAKE_COMMAND} -E remove ${CUDA_ADDITIONAL_CLEAN_FILES}) + + # Clear out the variable, so the next time we configure it will be empty. + # This is useful so that the files won't persist in the list after targets + # have been removed. + set(CUDA_ADDITIONAL_CLEAN_FILES "" CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.") +endmacro() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake new file mode 100644 index 0000000000000000000000000000000000000000..67f6bd6f2bcd1a0313078a28a07cc584df7b885b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake @@ -0,0 +1,386 @@ +# Distributed under the OSI-approved BSD 3-Clause License. See accompanying +# file Copyright.txt or https://cmake.org/licensing for details. + +#[=======================================================================[.rst: +FindPackageHandleStandardArgs +----------------------------- + +This module provides a function intended to be used in :ref:`Find Modules` +implementing :command:`find_package()` calls. It handles the +``REQUIRED``, ``QUIET`` and version-related arguments of ``find_package``. +It also sets the ``_FOUND`` variable. The package is +considered found if all variables listed contain valid results, e.g. +valid filepaths. + +.. command:: find_package_handle_standard_args + + There are two signatures:: + + find_package_handle_standard_args( + (DEFAULT_MSG|) + ... + ) + + find_package_handle_standard_args( + [FOUND_VAR ] + [REQUIRED_VARS ...] + [VERSION_VAR ] + [HANDLE_COMPONENTS] + [CONFIG_MODE] + [FAIL_MESSAGE ] + ) + + The ``_FOUND`` variable will be set to ``TRUE`` if all + the variables ``...`` are valid and any optional + constraints are satisfied, and ``FALSE`` otherwise. A success or + failure message may be displayed based on the results and on + whether the ``REQUIRED`` and/or ``QUIET`` option was given to + the :command:`find_package` call. + + The options are: + + ``(DEFAULT_MSG|)`` + In the simple signature this specifies the failure message. + Use ``DEFAULT_MSG`` to ask for a default message to be computed + (recommended). Not valid in the full signature. + + ``FOUND_VAR `` + Obsolete. Specifies either ``_FOUND`` or + ``_FOUND`` as the result variable. This exists only + for compatibility with older versions of CMake and is now ignored. + Result variables of both names are always set for compatibility. + + ``REQUIRED_VARS ...`` + Specify the variables which are required for this package. + These may be named in the generated failure message asking the + user to set the missing variable values. Therefore these should + typically be cache entries such as ``FOO_LIBRARY`` and not output + variables like ``FOO_LIBRARIES``. + + ``VERSION_VAR `` + Specify the name of a variable that holds the version of the package + that has been found. This version will be checked against the + (potentially) specified required version given to the + :command:`find_package` call, including its ``EXACT`` option. + The default messages include information about the required + version and the version which has been actually found, both + if the version is ok or not. + + ``HANDLE_COMPONENTS`` + Enable handling of package components. In this case, the command + will report which components have been found and which are missing, + and the ``_FOUND`` variable will be set to ``FALSE`` + if any of the required components (i.e. not the ones listed after + the ``OPTIONAL_COMPONENTS`` option of :command:`find_package`) are + missing. + + ``CONFIG_MODE`` + Specify that the calling find module is a wrapper around a + call to ``find_package( NO_MODULE)``. This implies + a ``VERSION_VAR`` value of ``_VERSION``. The command + will automatically check whether the package configuration file + was found. + + ``FAIL_MESSAGE `` + Specify a custom failure message instead of using the default + generated message. Not recommended. + +Example for the simple signature: + +.. code-block:: cmake + + find_package_handle_standard_args(LibXml2 DEFAULT_MSG + LIBXML2_LIBRARY LIBXML2_INCLUDE_DIR) + +The ``LibXml2`` package is considered to be found if both +``LIBXML2_LIBRARY`` and ``LIBXML2_INCLUDE_DIR`` are valid. +Then also ``LibXml2_FOUND`` is set to ``TRUE``. If it is not found +and ``REQUIRED`` was used, it fails with a +:command:`message(FATAL_ERROR)`, independent whether ``QUIET`` was +used or not. If it is found, success will be reported, including +the content of the first ````. On repeated CMake runs, +the same message will not be printed again. + +Example for the full signature: + +.. code-block:: cmake + + find_package_handle_standard_args(LibArchive + REQUIRED_VARS LibArchive_LIBRARY LibArchive_INCLUDE_DIR + VERSION_VAR LibArchive_VERSION) + +In this case, the ``LibArchive`` package is considered to be found if +both ``LibArchive_LIBRARY`` and ``LibArchive_INCLUDE_DIR`` are valid. +Also the version of ``LibArchive`` will be checked by using the version +contained in ``LibArchive_VERSION``. Since no ``FAIL_MESSAGE`` is given, +the default messages will be printed. + +Another example for the full signature: + +.. code-block:: cmake + + find_package(Automoc4 QUIET NO_MODULE HINTS /opt/automoc4) + find_package_handle_standard_args(Automoc4 CONFIG_MODE) + +In this case, a ``FindAutmoc4.cmake`` module wraps a call to +``find_package(Automoc4 NO_MODULE)`` and adds an additional search +directory for ``automoc4``. Then the call to +``find_package_handle_standard_args`` produces a proper success/failure +message. +#]=======================================================================] + +include(${CMAKE_CURRENT_LIST_DIR}/FindPackageMessage.cmake) + +# internal helper macro +macro(_FPHSA_FAILURE_MESSAGE _msg) + if (${_NAME}_FIND_REQUIRED) + message(FATAL_ERROR "${_msg}") + else () + if (NOT ${_NAME}_FIND_QUIETLY) + message(STATUS "${_msg}") + endif () + endif () +endmacro() + + +# internal helper macro to generate the failure message when used in CONFIG_MODE: +macro(_FPHSA_HANDLE_FAILURE_CONFIG_MODE) + # _CONFIG is set, but FOUND is false, this means that some other of the REQUIRED_VARS was not found: + if(${_NAME}_CONFIG) + _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: missing:${MISSING_VARS} (found ${${_NAME}_CONFIG} ${VERSION_MSG})") + else() + # If _CONSIDERED_CONFIGS is set, the config-file has been found, but no suitable version. + # List them all in the error message: + if(${_NAME}_CONSIDERED_CONFIGS) + set(configsText "") + list(LENGTH ${_NAME}_CONSIDERED_CONFIGS configsCount) + math(EXPR configsCount "${configsCount} - 1") + foreach(currentConfigIndex RANGE ${configsCount}) + list(GET ${_NAME}_CONSIDERED_CONFIGS ${currentConfigIndex} filename) + list(GET ${_NAME}_CONSIDERED_VERSIONS ${currentConfigIndex} version) + string(APPEND configsText " ${filename} (version ${version})\n") + endforeach() + if (${_NAME}_NOT_FOUND_MESSAGE) + string(APPEND configsText " Reason given by package: ${${_NAME}_NOT_FOUND_MESSAGE}\n") + endif() + _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE} ${VERSION_MSG}, checked the following files:\n${configsText}") + + else() + # Simple case: No Config-file was found at all: + _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: found neither ${_NAME}Config.cmake nor ${_NAME_LOWER}-config.cmake ${VERSION_MSG}") + endif() + endif() +endmacro() + + +function(FIND_PACKAGE_HANDLE_STANDARD_ARGS _NAME _FIRST_ARG) + +# Set up the arguments for `cmake_parse_arguments`. + set(options CONFIG_MODE HANDLE_COMPONENTS) + set(oneValueArgs FAIL_MESSAGE VERSION_VAR FOUND_VAR) + set(multiValueArgs REQUIRED_VARS) + +# Check whether we are in 'simple' or 'extended' mode: + set(_KEYWORDS_FOR_EXTENDED_MODE ${options} ${oneValueArgs} ${multiValueArgs} ) + list(FIND _KEYWORDS_FOR_EXTENDED_MODE "${_FIRST_ARG}" INDEX) + + if(${INDEX} EQUAL -1) + set(FPHSA_FAIL_MESSAGE ${_FIRST_ARG}) + set(FPHSA_REQUIRED_VARS ${ARGN}) + set(FPHSA_VERSION_VAR) + else() + cmake_parse_arguments(FPHSA "${options}" "${oneValueArgs}" "${multiValueArgs}" ${_FIRST_ARG} ${ARGN}) + + if(FPHSA_UNPARSED_ARGUMENTS) + message(FATAL_ERROR "Unknown keywords given to FIND_PACKAGE_HANDLE_STANDARD_ARGS(): \"${FPHSA_UNPARSED_ARGUMENTS}\"") + endif() + + if(NOT FPHSA_FAIL_MESSAGE) + set(FPHSA_FAIL_MESSAGE "DEFAULT_MSG") + endif() + + # In config-mode, we rely on the variable _CONFIG, which is set by find_package() + # when it successfully found the config-file, including version checking: + if(FPHSA_CONFIG_MODE) + list(INSERT FPHSA_REQUIRED_VARS 0 ${_NAME}_CONFIG) + list(REMOVE_DUPLICATES FPHSA_REQUIRED_VARS) + set(FPHSA_VERSION_VAR ${_NAME}_VERSION) + endif() + + if(NOT FPHSA_REQUIRED_VARS) + message(FATAL_ERROR "No REQUIRED_VARS specified for FIND_PACKAGE_HANDLE_STANDARD_ARGS()") + endif() + endif() + +# now that we collected all arguments, process them + + if("x${FPHSA_FAIL_MESSAGE}" STREQUAL "xDEFAULT_MSG") + set(FPHSA_FAIL_MESSAGE "Could NOT find ${_NAME}") + endif() + + list(GET FPHSA_REQUIRED_VARS 0 _FIRST_REQUIRED_VAR) + + string(TOUPPER ${_NAME} _NAME_UPPER) + string(TOLOWER ${_NAME} _NAME_LOWER) + + if(FPHSA_FOUND_VAR) + if(FPHSA_FOUND_VAR MATCHES "^${_NAME}_FOUND$" OR FPHSA_FOUND_VAR MATCHES "^${_NAME_UPPER}_FOUND$") + set(_FOUND_VAR ${FPHSA_FOUND_VAR}) + else() + message(FATAL_ERROR "The argument for FOUND_VAR is \"${FPHSA_FOUND_VAR}\", but only \"${_NAME}_FOUND\" and \"${_NAME_UPPER}_FOUND\" are valid names.") + endif() + else() + set(_FOUND_VAR ${_NAME_UPPER}_FOUND) + endif() + + # collect all variables which were not found, so they can be printed, so the + # user knows better what went wrong (#6375) + set(MISSING_VARS "") + set(DETAILS "") + # check if all passed variables are valid + set(FPHSA_FOUND_${_NAME} TRUE) + foreach(_CURRENT_VAR ${FPHSA_REQUIRED_VARS}) + if(NOT ${_CURRENT_VAR}) + set(FPHSA_FOUND_${_NAME} FALSE) + string(APPEND MISSING_VARS " ${_CURRENT_VAR}") + else() + string(APPEND DETAILS "[${${_CURRENT_VAR}}]") + endif() + endforeach() + if(FPHSA_FOUND_${_NAME}) + set(${_NAME}_FOUND TRUE) + set(${_NAME_UPPER}_FOUND TRUE) + else() + set(${_NAME}_FOUND FALSE) + set(${_NAME_UPPER}_FOUND FALSE) + endif() + + # component handling + unset(FOUND_COMPONENTS_MSG) + unset(MISSING_COMPONENTS_MSG) + + if(FPHSA_HANDLE_COMPONENTS) + foreach(comp ${${_NAME}_FIND_COMPONENTS}) + if(${_NAME}_${comp}_FOUND) + + if(NOT DEFINED FOUND_COMPONENTS_MSG) + set(FOUND_COMPONENTS_MSG "found components: ") + endif() + string(APPEND FOUND_COMPONENTS_MSG " ${comp}") + + else() + + if(NOT DEFINED MISSING_COMPONENTS_MSG) + set(MISSING_COMPONENTS_MSG "missing components: ") + endif() + string(APPEND MISSING_COMPONENTS_MSG " ${comp}") + + if(${_NAME}_FIND_REQUIRED_${comp}) + set(${_NAME}_FOUND FALSE) + string(APPEND MISSING_VARS " ${comp}") + endif() + + endif() + endforeach() + set(COMPONENT_MSG "${FOUND_COMPONENTS_MSG} ${MISSING_COMPONENTS_MSG}") + string(APPEND DETAILS "[c${COMPONENT_MSG}]") + endif() + + # version handling: + set(VERSION_MSG "") + set(VERSION_OK TRUE) + + # check with DEFINED here as the requested or found version may be "0" + if (DEFINED ${_NAME}_FIND_VERSION) + if(DEFINED ${FPHSA_VERSION_VAR}) + set(_FOUND_VERSION ${${FPHSA_VERSION_VAR}}) + + if(${_NAME}_FIND_VERSION_EXACT) # exact version required + # count the dots in the version string + string(REGEX REPLACE "[^.]" "" _VERSION_DOTS "${_FOUND_VERSION}") + # add one dot because there is one dot more than there are components + string(LENGTH "${_VERSION_DOTS}." _VERSION_DOTS) + if (_VERSION_DOTS GREATER ${_NAME}_FIND_VERSION_COUNT) + # Because of the C++ implementation of find_package() ${_NAME}_FIND_VERSION_COUNT + # is at most 4 here. Therefore a simple lookup table is used. + if (${_NAME}_FIND_VERSION_COUNT EQUAL 1) + set(_VERSION_REGEX "[^.]*") + elseif (${_NAME}_FIND_VERSION_COUNT EQUAL 2) + set(_VERSION_REGEX "[^.]*\\.[^.]*") + elseif (${_NAME}_FIND_VERSION_COUNT EQUAL 3) + set(_VERSION_REGEX "[^.]*\\.[^.]*\\.[^.]*") + else () + set(_VERSION_REGEX "[^.]*\\.[^.]*\\.[^.]*\\.[^.]*") + endif () + string(REGEX REPLACE "^(${_VERSION_REGEX})\\..*" "\\1" _VERSION_HEAD "${_FOUND_VERSION}") + unset(_VERSION_REGEX) + if (NOT ${_NAME}_FIND_VERSION VERSION_EQUAL _VERSION_HEAD) + set(VERSION_MSG "Found unsuitable version \"${_FOUND_VERSION}\", but required is exact version \"${${_NAME}_FIND_VERSION}\"") + set(VERSION_OK FALSE) + else () + set(VERSION_MSG "(found suitable exact version \"${_FOUND_VERSION}\")") + endif () + unset(_VERSION_HEAD) + else () + if (NOT ${_NAME}_FIND_VERSION VERSION_EQUAL _FOUND_VERSION) + set(VERSION_MSG "Found unsuitable version \"${_FOUND_VERSION}\", but required is exact version \"${${_NAME}_FIND_VERSION}\"") + set(VERSION_OK FALSE) + else () + set(VERSION_MSG "(found suitable exact version \"${_FOUND_VERSION}\")") + endif () + endif () + unset(_VERSION_DOTS) + + else() # minimum version specified: + if (${_NAME}_FIND_VERSION VERSION_GREATER _FOUND_VERSION) + set(VERSION_MSG "Found unsuitable version \"${_FOUND_VERSION}\", but required is at least \"${${_NAME}_FIND_VERSION}\"") + set(VERSION_OK FALSE) + else () + set(VERSION_MSG "(found suitable version \"${_FOUND_VERSION}\", minimum required is \"${${_NAME}_FIND_VERSION}\")") + endif () + endif() + + else() + + # if the package was not found, but a version was given, add that to the output: + if(${_NAME}_FIND_VERSION_EXACT) + set(VERSION_MSG "(Required is exact version \"${${_NAME}_FIND_VERSION}\")") + else() + set(VERSION_MSG "(Required is at least version \"${${_NAME}_FIND_VERSION}\")") + endif() + + endif() + else () + # Check with DEFINED as the found version may be 0. + if(DEFINED ${FPHSA_VERSION_VAR}) + set(VERSION_MSG "(found version \"${${FPHSA_VERSION_VAR}}\")") + endif() + endif () + + if(VERSION_OK) + string(APPEND DETAILS "[v${${FPHSA_VERSION_VAR}}(${${_NAME}_FIND_VERSION})]") + else() + set(${_NAME}_FOUND FALSE) + endif() + + + # print the result: + if (${_NAME}_FOUND) + FIND_PACKAGE_MESSAGE(${_NAME} "Found ${_NAME}: ${${_FIRST_REQUIRED_VAR}} ${VERSION_MSG} ${COMPONENT_MSG}" "${DETAILS}") + else () + + if(FPHSA_CONFIG_MODE) + _FPHSA_HANDLE_FAILURE_CONFIG_MODE() + else() + if(NOT VERSION_OK) + _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: ${VERSION_MSG} (found ${${_FIRST_REQUIRED_VAR}})") + else() + _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE} (missing:${MISSING_VARS}) ${VERSION_MSG}") + endif() + endif() + + endif () + + set(${_NAME}_FOUND ${${_NAME}_FOUND} PARENT_SCOPE) + set(${_NAME_UPPER}_FOUND ${${_NAME}_FOUND} PARENT_SCOPE) +endfunction() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindPackageMessage.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindPackageMessage.cmake new file mode 100644 index 0000000000000000000000000000000000000000..6821cee4f77a9d84c74f2c140870a2163ae5a5f0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindPackageMessage.cmake @@ -0,0 +1,47 @@ +# Distributed under the OSI-approved BSD 3-Clause License. See accompanying +# file Copyright.txt or https://cmake.org/licensing for details. + +#.rst: +# FindPackageMessage +# ------------------ +# +# +# +# FIND_PACKAGE_MESSAGE( "message for user" "find result details") +# +# This macro is intended to be used in FindXXX.cmake modules files. It +# will print a message once for each unique find result. This is useful +# for telling the user where a package was found. The first argument +# specifies the name (XXX) of the package. The second argument +# specifies the message to display. The third argument lists details +# about the find result so that if they change the message will be +# displayed again. The macro also obeys the QUIET argument to the +# find_package command. +# +# Example: +# +# :: +# +# if(X11_FOUND) +# FIND_PACKAGE_MESSAGE(X11 "Found X11: ${X11_X11_LIB}" +# "[${X11_X11_LIB}][${X11_INCLUDE_DIR}]") +# else() +# ... +# endif() + +function(FIND_PACKAGE_MESSAGE pkg msg details) + # Avoid printing a message repeatedly for the same find result. + if(NOT ${pkg}_FIND_QUIETLY) + string(REPLACE "\n" "" details "${details}") + set(DETAILS_VAR FIND_PACKAGE_MESSAGE_DETAILS_${pkg}) + if(NOT "${details}" STREQUAL "${${DETAILS_VAR}}") + # The message has not yet been printed. + message(STATUS "${msg}") + + # Save the find details in the cache to avoid printing the same + # message again. + set("${DETAILS_VAR}" "${details}" + CACHE INTERNAL "Details about finding ${pkg}") + endif() + endif() +endfunction() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/LoadHIP.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/LoadHIP.cmake new file mode 100644 index 0000000000000000000000000000000000000000..f6ca263c5e5b8ec08a9a3400d4590f9e8c0b22e9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/LoadHIP.cmake @@ -0,0 +1,301 @@ +set(PYTORCH_FOUND_HIP FALSE) + +if(NOT DEFINED ENV{ROCM_PATH}) + set(ROCM_PATH /opt/rocm) +else() + set(ROCM_PATH $ENV{ROCM_PATH}) +endif() +if(NOT DEFINED ENV{ROCM_INCLUDE_DIRS}) + set(ROCM_INCLUDE_DIRS ${ROCM_PATH}/include) +else() + set(ROCM_INCLUDE_DIRS $ENV{ROCM_INCLUDE_DIRS}) +endif() + +if(NOT EXISTS ${ROCM_PATH}) + return() +endif() + +# MAGMA_HOME +if(NOT DEFINED ENV{MAGMA_HOME}) + set(MAGMA_HOME ${ROCM_PATH}/magma) + set(ENV{MAGMA_HOME} ${ROCM_PATH}/magma) +else() + set(MAGMA_HOME $ENV{MAGMA_HOME}) +endif() + +torch_hip_get_arch_list(PYTORCH_ROCM_ARCH) +if(PYTORCH_ROCM_ARCH STREQUAL "") + message(FATAL_ERROR "No GPU arch specified for ROCm build. Please use PYTORCH_ROCM_ARCH environment variable to specify GPU archs to build for.") +endif() +message("Building PyTorch for GPU arch: ${PYTORCH_ROCM_ARCH}") + +# Add HIP to the CMAKE Module Path +set(CMAKE_MODULE_PATH ${ROCM_PATH}/lib/cmake/hip ${CMAKE_MODULE_PATH}) + +macro(find_package_and_print_version PACKAGE_NAME) + find_package("${PACKAGE_NAME}" ${ARGN}) + message("${PACKAGE_NAME} VERSION: ${${PACKAGE_NAME}_VERSION}") +endmacro() + +# Find the HIP Package +find_package_and_print_version(HIP 1.0) + +if(HIP_FOUND) + set(PYTORCH_FOUND_HIP TRUE) + set(FOUND_ROCM_VERSION_H FALSE) + + set(PROJECT_RANDOM_BINARY_DIR "${PROJECT_BINARY_DIR}") + set(file "${PROJECT_BINARY_DIR}/detect_rocm_version.cc") + + # Find ROCM version for checks + # ROCM 5.0 and later will have header api for version management + if(EXISTS ${ROCM_INCLUDE_DIRS}/rocm_version.h) + set(FOUND_ROCM_VERSION_H TRUE) + file(WRITE ${file} "" + "#include \n" + ) + elseif(EXISTS ${ROCM_INCLUDE_DIRS}/rocm-core/rocm_version.h) + set(FOUND_ROCM_VERSION_H TRUE) + file(WRITE ${file} "" + "#include \n" + ) + else() + message("********************* rocm_version.h couldnt be found ******************\n") + endif() + + if(FOUND_ROCM_VERSION_H) + file(APPEND ${file} "" + "#include \n" + + "#ifndef ROCM_VERSION_PATCH\n" + "#define ROCM_VERSION_PATCH 0\n" + "#endif\n" + "#define STRINGIFYHELPER(x) #x\n" + "#define STRINGIFY(x) STRINGIFYHELPER(x)\n" + "int main() {\n" + " printf(\"%d.%d.%s\", ROCM_VERSION_MAJOR, ROCM_VERSION_MINOR, STRINGIFY(ROCM_VERSION_PATCH));\n" + " return 0;\n" + "}\n" + ) + + try_run(run_result compile_result ${PROJECT_RANDOM_BINARY_DIR} ${file} + CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${ROCM_INCLUDE_DIRS}" + RUN_OUTPUT_VARIABLE rocm_version_from_header + COMPILE_OUTPUT_VARIABLE output_var + ) + # We expect the compile to be successful if the include directory exists. + if(NOT compile_result) + message(FATAL_ERROR "Caffe2: Couldn't determine version from header: " ${output_var}) + endif() + message(STATUS "Caffe2: Header version is: " ${rocm_version_from_header}) + set(ROCM_VERSION_DEV_RAW ${rocm_version_from_header}) + message("\n***** ROCm version from rocm_version.h ****\n") + endif() + + string(REGEX MATCH "^([0-9]+)\.([0-9]+)\.([0-9]+).*$" ROCM_VERSION_DEV_MATCH ${ROCM_VERSION_DEV_RAW}) + + if(ROCM_VERSION_DEV_MATCH) + set(ROCM_VERSION_DEV_MAJOR ${CMAKE_MATCH_1}) + set(ROCM_VERSION_DEV_MINOR ${CMAKE_MATCH_2}) + set(ROCM_VERSION_DEV_PATCH ${CMAKE_MATCH_3}) + set(ROCM_VERSION_DEV "${ROCM_VERSION_DEV_MAJOR}.${ROCM_VERSION_DEV_MINOR}.${ROCM_VERSION_DEV_PATCH}") + math(EXPR ROCM_VERSION_DEV_INT "(${ROCM_VERSION_DEV_MAJOR}*10000) + (${ROCM_VERSION_DEV_MINOR}*100) + ${ROCM_VERSION_DEV_PATCH}") + endif() + + message("ROCM_VERSION_DEV: ${ROCM_VERSION_DEV}") + message("ROCM_VERSION_DEV_MAJOR: ${ROCM_VERSION_DEV_MAJOR}") + message("ROCM_VERSION_DEV_MINOR: ${ROCM_VERSION_DEV_MINOR}") + message("ROCM_VERSION_DEV_PATCH: ${ROCM_VERSION_DEV_PATCH}") + message("ROCM_VERSION_DEV_INT: ${ROCM_VERSION_DEV_INT}") + + math(EXPR TORCH_HIP_VERSION "(${HIP_VERSION_MAJOR} * 100) + ${HIP_VERSION_MINOR}") + message("HIP_VERSION_MAJOR: ${HIP_VERSION_MAJOR}") + message("HIP_VERSION_MINOR: ${HIP_VERSION_MINOR}") + message("TORCH_HIP_VERSION: ${TORCH_HIP_VERSION}") + + message("\n***** Library versions from dpkg *****\n") + execute_process(COMMAND dpkg -l COMMAND grep rocm-dev COMMAND awk "{print $2 \" VERSION: \" $3}") + execute_process(COMMAND dpkg -l COMMAND grep rocm-libs COMMAND awk "{print $2 \" VERSION: \" $3}") + execute_process(COMMAND dpkg -l COMMAND grep hsakmt-roct COMMAND awk "{print $2 \" VERSION: \" $3}") + execute_process(COMMAND dpkg -l COMMAND grep rocr-dev COMMAND awk "{print $2 \" VERSION: \" $3}") + execute_process(COMMAND dpkg -l COMMAND grep -w hcc COMMAND awk "{print $2 \" VERSION: \" $3}") + execute_process(COMMAND dpkg -l COMMAND grep hip-base COMMAND awk "{print $2 \" VERSION: \" $3}") + execute_process(COMMAND dpkg -l COMMAND grep hip_hcc COMMAND awk "{print $2 \" VERSION: \" $3}") + + message("\n***** Library versions from cmake find_package *****\n") + + set(CMAKE_HIP_CLANG_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG}) + set(CMAKE_HIP_CLANG_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE}) + ### Remove setting of Flags when FindHIP.CMake PR #558 is accepted.### + + set(hip_DIR ${ROCM_PATH}/lib/cmake/hip) + set(hsa-runtime64_DIR ${ROCM_PATH}/lib/cmake/hsa-runtime64) + set(AMDDeviceLibs_DIR ${ROCM_PATH}/lib/cmake/AMDDeviceLibs) + set(amd_comgr_DIR ${ROCM_PATH}/lib/cmake/amd_comgr) + set(rocrand_DIR ${ROCM_PATH}/lib/cmake/rocrand) + set(hiprand_DIR ${ROCM_PATH}/lib/cmake/hiprand) + set(rocblas_DIR ${ROCM_PATH}/lib/cmake/rocblas) + set(hipblas_DIR ${ROCM_PATH}/lib/cmake/hipblas) + set(hipblaslt_DIR ${ROCM_PATH}/lib/cmake/hipblaslt) + set(miopen_DIR ${ROCM_PATH}/lib/cmake/miopen) + set(rocfft_DIR ${ROCM_PATH}/lib/cmake/rocfft) + set(hipfft_DIR ${ROCM_PATH}/lib/cmake/hipfft) + set(hipsparse_DIR ${ROCM_PATH}/lib/cmake/hipsparse) + set(rccl_DIR ${ROCM_PATH}/lib/cmake/rccl) + set(rocprim_DIR ${ROCM_PATH}/lib/cmake/rocprim) + set(hipcub_DIR ${ROCM_PATH}/lib/cmake/hipcub) + set(rocthrust_DIR ${ROCM_PATH}/lib/cmake/rocthrust) + set(hipsolver_DIR ${ROCM_PATH}/lib/cmake/hipsolver) + + + find_package_and_print_version(hip REQUIRED) + find_package_and_print_version(hsa-runtime64 REQUIRED) + find_package_and_print_version(amd_comgr REQUIRED) + find_package_and_print_version(rocrand REQUIRED) + find_package_and_print_version(hiprand REQUIRED) + find_package_and_print_version(rocblas REQUIRED) + find_package_and_print_version(hipblas REQUIRED) + if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0") + find_package_and_print_version(hipblaslt REQUIRED) + endif() + find_package_and_print_version(miopen REQUIRED) + if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "4.1.0") + find_package_and_print_version(hipfft REQUIRED) + else() + find_package_and_print_version(rocfft REQUIRED) + endif() + find_package_and_print_version(hipsparse REQUIRED) + find_package_and_print_version(rccl) + find_package_and_print_version(rocprim REQUIRED) + find_package_and_print_version(hipcub REQUIRED) + find_package_and_print_version(rocthrust REQUIRED) + find_package_and_print_version(hipsolver REQUIRED) + + + find_library(PYTORCH_HIP_LIBRARIES amdhip64 HINTS ${ROCM_PATH}/lib) + # TODO: miopen_LIBRARIES should return fullpath to the library file, + # however currently it's just the lib name + if(TARGET ${miopen_LIBRARIES}) + set(PYTORCH_MIOPEN_LIBRARIES ${miopen_LIBRARIES}) + else() + find_library(PYTORCH_MIOPEN_LIBRARIES ${miopen_LIBRARIES} HINTS ${ROCM_PATH}/lib) + endif() + # TODO: rccl_LIBRARIES should return fullpath to the library file, + # however currently it's just the lib name + if(TARGET ${rccl_LIBRARIES}) + set(PYTORCH_RCCL_LIBRARIES ${rccl_LIBRARIES}) + else() + find_library(PYTORCH_RCCL_LIBRARIES ${rccl_LIBRARIES} HINTS ${ROCM_PATH}/lib) + endif() + find_library(ROCM_HIPRTC_LIB hiprtc HINTS ${ROCM_PATH}/lib) + # roctx is part of roctracer + find_library(ROCM_ROCTX_LIB roctx64 HINTS ${ROCM_PATH}/lib) + + if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0") + # check whether hipblaslt is using its own datatype + set(file "${PROJECT_BINARY_DIR}/hipblaslt_test_data_type.cc") + file(WRITE ${file} "" + "#include \n" + "int main() {\n" + " hipblasltDatatype_t bar = HIPBLASLT_R_16F;\n" + " return 0;\n" + "}\n" + ) + + try_compile(hipblaslt_compile_result_custom_datatype ${PROJECT_RANDOM_BINARY_DIR} ${file} + CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${ROCM_INCLUDE_DIRS}" + COMPILE_DEFINITIONS -D__HIP_PLATFORM_AMD__ -D__HIP_PLATFORM_HCC__ + OUTPUT_VARIABLE hipblaslt_compile_output) + + if(hipblaslt_compile_result_custom_datatype) + set(HIPBLASLT_CUSTOM_DATA_TYPE ON) + #message("hipblaslt is using custom data type: ${hipblaslt_compile_output}") + message("hipblaslt is using custom data type") + else() + set(HIPBLASLT_CUSTOM_DATA_TYPE OFF) + #message("hipblaslt is NOT using custom data type: ${hipblaslt_compile_output}") + message("hipblaslt is NOT using custom data type") + endif() + + # check whether hipblaslt is using its own compute type + set(file "${PROJECT_BINARY_DIR}/hipblaslt_test_compute_type.cc") + file(WRITE ${file} "" + "#include \n" + "int main() {\n" + " hipblasLtComputeType_t baz = HIPBLASLT_COMPUTE_F32;\n" + " return 0;\n" + "}\n" + ) + + try_compile(hipblaslt_compile_result_custom_compute_type ${PROJECT_RANDOM_BINARY_DIR} ${file} + CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${ROCM_INCLUDE_DIRS}" + COMPILE_DEFINITIONS -D__HIP_PLATFORM_AMD__ -D__HIP_PLATFORM_HCC__ + OUTPUT_VARIABLE hipblaslt_compile_output) + + if(hipblaslt_compile_result_custom_compute_type) + set(HIPBLASLT_CUSTOM_COMPUTE_TYPE ON) + #message("hipblaslt is using custom compute type: ${hipblaslt_compile_output}") + message("hipblaslt is using custom compute type") + else() + set(HIPBLASLT_CUSTOM_COMPUTE_TYPE OFF) + #message("hipblaslt is NOT using custom compute type: ${hipblaslt_compile_output}") + message("hipblaslt is NOT using custom compute type") + endif() + + # check whether hipblaslt provides getIndexFromAlgo + set(file "${PROJECT_BINARY_DIR}/hipblaslt_test_getIndexFromAlgo.cc") + file(WRITE ${file} "" + "#include \n" + "#include \n" + "int main() {\n" + " hipblasLtMatmulAlgo_t algo;\n" + " return hipblaslt_ext::getIndexFromAlgo(algo);\n" + " return 0;\n" + "}\n" + ) + + try_compile(hipblaslt_compile_result_getindexfromalgo ${PROJECT_RANDOM_BINARY_DIR} ${file} + CMAKE_FLAGS + "-DINCLUDE_DIRECTORIES=${ROCM_INCLUDE_DIRS}" + "-DLINK_DIRECTORIES=${ROCM_PATH}/lib" + LINK_LIBRARIES ${hipblaslt_LIBRARIES} + COMPILE_DEFINITIONS -D__HIP_PLATFORM_AMD__ -D__HIP_PLATFORM_HCC__ + OUTPUT_VARIABLE hipblaslt_compile_output) + + if(hipblaslt_compile_result_getindexfromalgo) + set(HIPBLASLT_HAS_GETINDEXFROMALGO ON) + #message("hipblaslt provides getIndexFromAlgo: ${hipblaslt_compile_output}") + message("hipblaslt provides getIndexFromAlgo") + else() + set(HAS_GETINDEXFROMALGO OFF) + #message("hipblaslt does not provide getIndexFromAlgo: ${hipblaslt_compile_output}") + message("hipblaslt does not provide getIndexFromAlgo") + endif() + endif() + + # check whether HIP declares new types + set(file "${PROJECT_BINARY_DIR}/hip_new_types.cc") + file(WRITE ${file} "" + "#include \n" + "int main() {\n" + " hipDataType baz = HIP_R_8F_E4M3_FNUZ;\n" + " return 0;\n" + "}\n" + ) + + try_compile(hipblaslt_compile_result ${PROJECT_RANDOM_BINARY_DIR} ${file} + CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${ROCM_INCLUDE_DIRS}" + COMPILE_DEFINITIONS -D__HIP_PLATFORM_AMD__ -D__HIP_PLATFORM_HCC__ + OUTPUT_VARIABLE hipblaslt_compile_output) + + if(hipblaslt_compile_result) + set(HIP_NEW_TYPE_ENUMS ON) + #message("HIP is using new type enums: ${hipblaslt_compile_output}") + message("HIP is using new type enums") + else() + set(HIP_NEW_TYPE_ENUMS OFF) + #message("HIP is NOT using new type enums: ${hipblaslt_compile_output}") + message("HIP is NOT using new type enums") + endif() + +endif() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/cuda.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/cuda.cmake new file mode 100644 index 0000000000000000000000000000000000000000..8160b5e1fa88b83ca1eb4c2fb416571e99f54884 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/cuda.cmake @@ -0,0 +1,398 @@ +# ---[ cuda + +# Poor man's include guard +if(TARGET torch::cudart) + return() +endif() + +# sccache is only supported in CMake master and not in the newest official +# release (3.11.3) yet. Hence we need our own Modules_CUDA_fix to enable sccache. +list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/../Modules_CUDA_fix) + +# We don't want to statically link cudart, because we rely on it's dynamic linkage in +# python (follow along torch/cuda/__init__.py and usage of cudaGetErrorName). +# Technically, we can link cudart here statically, and link libtorch_python.so +# to a dynamic libcudart.so, but that's just wasteful. +# However, on Windows, if this one gets switched off, the error "cuda: unknown error" +# will be raised when running the following code: +# >>> import torch +# >>> torch.cuda.is_available() +# >>> torch.cuda.current_device() +# More details can be found in the following links. +# https://github.com/pytorch/pytorch/issues/20635 +# https://github.com/pytorch/pytorch/issues/17108 +if(NOT MSVC) + set(CUDA_USE_STATIC_CUDA_RUNTIME OFF CACHE INTERNAL "") +endif() + +# Find CUDA. +find_package(CUDA) +if(NOT CUDA_FOUND) + message(WARNING + "Caffe2: CUDA cannot be found. Depending on whether you are building " + "Caffe2 or a Caffe2 dependent library, the next warning / error will " + "give you more info.") + set(CAFFE2_USE_CUDA OFF) + return() +endif() + +# Enable CUDA language support +set(CUDAToolkit_ROOT "${CUDA_TOOLKIT_ROOT_DIR}") +# Pass clang as host compiler, which according to the docs +# Must be done before CUDA language is enabled, see +# https://cmake.org/cmake/help/v3.15/variable/CMAKE_CUDA_HOST_COMPILER.html +if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + set(CMAKE_CUDA_HOST_COMPILER "${CMAKE_C_COMPILER}") +endif() +enable_language(CUDA) +if("X${CMAKE_CUDA_STANDARD}" STREQUAL "X" ) + set(CMAKE_CUDA_STANDARD ${CMAKE_CXX_STANDARD}) +endif() +set(CMAKE_CUDA_STANDARD_REQUIRED ON) + +# CMP0074 - find_package will respect _ROOT variables +cmake_policy(PUSH) +if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.12.0) + cmake_policy(SET CMP0074 NEW) +endif() + +find_package(CUDAToolkit REQUIRED) + +cmake_policy(POP) + +if(NOT CMAKE_CUDA_COMPILER_VERSION VERSION_EQUAL CUDAToolkit_VERSION) + message(FATAL_ERROR "Found two conflicting CUDA versions:\n" + "V${CMAKE_CUDA_COMPILER_VERSION} in '${CUDA_INCLUDE_DIRS}' and\n" + "V${CUDAToolkit_VERSION} in '${CUDAToolkit_INCLUDE_DIRS}'") +endif() + +if(NOT TARGET CUDA::nvToolsExt) + message(FATAL_ERROR "Failed to find nvToolsExt") +endif() + +message(STATUS "Caffe2: CUDA detected: " ${CUDA_VERSION}) +message(STATUS "Caffe2: CUDA nvcc is: " ${CUDA_NVCC_EXECUTABLE}) +message(STATUS "Caffe2: CUDA toolkit directory: " ${CUDA_TOOLKIT_ROOT_DIR}) +if(CUDA_VERSION VERSION_LESS 11.0) + message(FATAL_ERROR "PyTorch requires CUDA 11.0 or above.") +endif() + +if(CUDA_FOUND) + # Sometimes, we may mismatch nvcc with the CUDA headers we are + # compiling with, e.g., if a ccache nvcc is fed to us by CUDA_NVCC_EXECUTABLE + # but the PATH is not consistent with CUDA_HOME. It's better safe + # than sorry: make sure everything is consistent. + if(MSVC AND CMAKE_GENERATOR MATCHES "Visual Studio") + # When using Visual Studio, it attempts to lock the whole binary dir when + # `try_run` is called, which will cause the build to fail. + string(RANDOM BUILD_SUFFIX) + set(PROJECT_RANDOM_BINARY_DIR "${PROJECT_BINARY_DIR}/${BUILD_SUFFIX}") + else() + set(PROJECT_RANDOM_BINARY_DIR "${PROJECT_BINARY_DIR}") + endif() + set(file "${PROJECT_BINARY_DIR}/detect_cuda_version.cc") + file(WRITE ${file} "" + "#include \n" + "#include \n" + "int main() {\n" + " printf(\"%d.%d\", CUDA_VERSION / 1000, (CUDA_VERSION / 10) % 100);\n" + " return 0;\n" + "}\n" + ) + if(NOT CMAKE_CROSSCOMPILING) + try_run(run_result compile_result ${PROJECT_RANDOM_BINARY_DIR} ${file} + CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${CUDA_INCLUDE_DIRS}" + LINK_LIBRARIES ${CUDA_LIBRARIES} + RUN_OUTPUT_VARIABLE cuda_version_from_header + COMPILE_OUTPUT_VARIABLE output_var + ) + if(NOT compile_result) + message(FATAL_ERROR "Caffe2: Couldn't determine version from header: " ${output_var}) + endif() + message(STATUS "Caffe2: Header version is: " ${cuda_version_from_header}) + if(NOT cuda_version_from_header STREQUAL ${CUDA_VERSION_STRING}) + # Force CUDA to be processed for again next time + # TODO: I'm not sure if this counts as an implementation detail of + # FindCUDA + set(${cuda_version_from_findcuda} ${CUDA_VERSION_STRING}) + unset(CUDA_TOOLKIT_ROOT_DIR_INTERNAL CACHE) + # Not strictly necessary, but for good luck. + unset(CUDA_VERSION CACHE) + # Error out + message(FATAL_ERROR "FindCUDA says CUDA version is ${cuda_version_from_findcuda} (usually determined by nvcc), " + "but the CUDA headers say the version is ${cuda_version_from_header}. This often occurs " + "when you set both CUDA_HOME and CUDA_NVCC_EXECUTABLE to " + "non-standard locations, without also setting PATH to point to the correct nvcc. " + "Perhaps, try re-running this command again with PATH=${CUDA_TOOLKIT_ROOT_DIR}/bin:$PATH. " + "See above log messages for more diagnostics, and see https://github.com/pytorch/pytorch/issues/8092 for more details.") + endif() + endif() +endif() + +# Optionally, find TensorRT +if(CAFFE2_USE_TENSORRT) + find_path(TENSORRT_INCLUDE_DIR NvInfer.h + HINTS ${TENSORRT_ROOT} ${CUDA_TOOLKIT_ROOT_DIR} + PATH_SUFFIXES include) + find_library(TENSORRT_LIBRARY nvinfer + HINTS ${TENSORRT_ROOT} ${CUDA_TOOLKIT_ROOT_DIR} + PATH_SUFFIXES lib lib64 lib/x64) + find_package_handle_standard_args( + TENSORRT DEFAULT_MSG TENSORRT_INCLUDE_DIR TENSORRT_LIBRARY) + if(TENSORRT_FOUND) + execute_process(COMMAND /bin/sh -c "[ -r \"${TENSORRT_INCLUDE_DIR}/NvInferVersion.h\" ] && awk '/^\#define NV_TENSORRT_MAJOR/ {print $3}' \"${TENSORRT_INCLUDE_DIR}/NvInferVersion.h\"" OUTPUT_VARIABLE TENSORRT_VERSION_MAJOR) + execute_process(COMMAND /bin/sh -c "[ -r \"${TENSORRT_INCLUDE_DIR}/NvInferVersion.h\" ] && awk '/^\#define NV_TENSORRT_MINOR/ {print $3}' \"${TENSORRT_INCLUDE_DIR}/NvInferVersion.h\"" OUTPUT_VARIABLE TENSORRT_VERSION_MINOR) + if(TENSORRT_VERSION_MAJOR) + string(STRIP ${TENSORRT_VERSION_MAJOR} TENSORRT_VERSION_MAJOR) + string(STRIP ${TENSORRT_VERSION_MINOR} TENSORRT_VERSION_MINOR) + set(TENSORRT_VERSION "${TENSORRT_VERSION_MAJOR}.${TENSORRT_VERSION_MINOR}") + #CAFFE2_USE_TRT is set in Dependencies + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DTENSORRT_VERSION_MAJOR=${TENSORRT_VERSION_MAJOR}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DTENSORRT_VERSION_MINOR=${TENSORRT_VERSION_MINOR}") + else() + message(WARNING "Caffe2: Cannot find ${TENSORRT_INCLUDE_DIR}/NvInferVersion.h. Assuming TRT 5.0 which is no longer supported. Turning the option off.") + set(CAFFE2_USE_TENSORRT OFF) + endif() + else() + message(WARNING + "Caffe2: Cannot find TensorRT library. Turning the option off.") + set(CAFFE2_USE_TENSORRT OFF) + endif() +endif() + +# ---[ CUDA libraries wrapper + +# find libcuda.so and lbnvrtc.so +# For libcuda.so, we will find it under lib, lib64, and then the +# stubs folder, in case we are building on a system that does not +# have cuda driver installed. On windows, we also search under the +# folder lib/x64. +set(CUDA_CUDA_LIB "${CUDA_cuda_driver_LIBRARY}" CACHE FILEPATH "") +set(CUDA_NVRTC_LIB "${CUDA_nvrtc_LIBRARY}" CACHE FILEPATH "") +if(CUDA_NVRTC_LIB AND NOT CUDA_NVRTC_SHORTHASH) + if("${PYTHON_EXECUTABLE}" STREQUAL "") + set(_python_exe "python") + else() + set(_python_exe "${PYTHON_EXECUTABLE}") + endif() + execute_process( + COMMAND "${_python_exe}" -c + "import hashlib;hash=hashlib.sha256();hash.update(open('${CUDA_NVRTC_LIB}','rb').read());print(hash.hexdigest()[:8])" + RESULT_VARIABLE _retval + OUTPUT_VARIABLE CUDA_NVRTC_SHORTHASH) + if(NOT _retval EQUAL 0) + message(WARNING "Failed to compute shorthash for libnvrtc.so") + set(CUDA_NVRTC_SHORTHASH "XXXXXXXX") + else() + string(STRIP "${CUDA_NVRTC_SHORTHASH}" CUDA_NVRTC_SHORTHASH) + message(STATUS "${CUDA_NVRTC_LIB} shorthash is ${CUDA_NVRTC_SHORTHASH}") + endif() +endif() + +# Create new style imported libraries. +# Several of these libraries have a hardcoded path if CAFFE2_STATIC_LINK_CUDA +# is set. This path is where sane CUDA installations have their static +# libraries installed. This flag should only be used for binary builds, so +# end-users should never have this flag set. + +# cuda +add_library(caffe2::cuda INTERFACE IMPORTED) +set_property( + TARGET caffe2::cuda PROPERTY INTERFACE_LINK_LIBRARIES + CUDA::cuda_driver) + +# cudart +add_library(torch::cudart INTERFACE IMPORTED) +if(CAFFE2_STATIC_LINK_CUDA) + set_property( + TARGET torch::cudart PROPERTY INTERFACE_LINK_LIBRARIES + CUDA::cudart_static) +else() + set_property( + TARGET torch::cudart PROPERTY INTERFACE_LINK_LIBRARIES + CUDA::cudart) +endif() + +# nvToolsExt +add_library(torch::nvtoolsext INTERFACE IMPORTED) +set_property( + TARGET torch::nvtoolsext PROPERTY INTERFACE_LINK_LIBRARIES + CUDA::nvToolsExt) + +# cublas +add_library(caffe2::cublas INTERFACE IMPORTED) +if(CAFFE2_STATIC_LINK_CUDA AND NOT WIN32) + set_property( + TARGET caffe2::cublas PROPERTY INTERFACE_LINK_LIBRARIES + # NOTE: cublas is always linked dynamically + CUDA::cublas CUDA::cublasLt) + set_property( + TARGET caffe2::cublas APPEND PROPERTY INTERFACE_LINK_LIBRARIES + CUDA::cudart_static rt) +else() + set_property( + TARGET caffe2::cublas PROPERTY INTERFACE_LINK_LIBRARIES + CUDA::cublas CUDA::cublasLt) +endif() + +# cudnn interface +# static linking is handled by USE_STATIC_CUDNN environment variable +if(CAFFE2_USE_CUDNN) + if(USE_STATIC_CUDNN) + set(CUDNN_STATIC ON CACHE BOOL "") + else() + set(CUDNN_STATIC OFF CACHE BOOL "") + endif() + + find_package(CUDNN) + + if(NOT CUDNN_FOUND) + message(WARNING + "Cannot find cuDNN library. Turning the option off") + set(CAFFE2_USE_CUDNN OFF) + else() + if(CUDNN_VERSION VERSION_LESS "8.1.0") + message(FATAL_ERROR "PyTorch requires cuDNN 8.1 and above.") + endif() + endif() + + add_library(torch::cudnn INTERFACE IMPORTED) + target_include_directories(torch::cudnn INTERFACE ${CUDNN_INCLUDE_PATH}) + if(CUDNN_STATIC AND NOT WIN32) + target_link_options(torch::cudnn INTERFACE + "-Wl,--exclude-libs,libcudnn_static.a") + else() + target_link_libraries(torch::cudnn INTERFACE ${CUDNN_LIBRARY_PATH}) + endif() +else() + message(STATUS "USE_CUDNN is set to 0. Compiling without cuDNN support") +endif() + +if(CAFFE2_USE_CUSPARSELT) + find_package(CUSPARSELT) + + if(NOT CUSPARSELT_FOUND) + message(WARNING + "Cannot find cuSPARSELt library. Turning the option off") + set(CAFFE2_USE_CUSPARSELT OFF) + else() + add_library(torch::cusparselt INTERFACE IMPORTED) + target_include_directories(torch::cusparselt INTERFACE ${CUSPARSELT_INCLUDE_PATH}) + target_link_libraries(torch::cusparselt INTERFACE ${CUSPARSELT_LIBRARY_PATH}) + endif() +else() + message(STATUS "USE_CUSPARSELT is set to 0. Compiling without cuSPARSELt support") +endif() + +# curand +add_library(caffe2::curand INTERFACE IMPORTED) +if(CAFFE2_STATIC_LINK_CUDA AND NOT WIN32) + set_property( + TARGET caffe2::curand PROPERTY INTERFACE_LINK_LIBRARIES + CUDA::curand_static) +else() + set_property( + TARGET caffe2::curand PROPERTY INTERFACE_LINK_LIBRARIES + CUDA::curand) +endif() + +# cufft +add_library(caffe2::cufft INTERFACE IMPORTED) +if(CAFFE2_STATIC_LINK_CUDA AND NOT WIN32) + set_property( + TARGET caffe2::cufft PROPERTY INTERFACE_LINK_LIBRARIES + CUDA::cufft_static_nocallback) +else() + set_property( + TARGET caffe2::cufft PROPERTY INTERFACE_LINK_LIBRARIES + CUDA::cufft) +endif() + +# TensorRT +if(CAFFE2_USE_TENSORRT) + add_library(caffe2::tensorrt UNKNOWN IMPORTED) + set_property( + TARGET caffe2::tensorrt PROPERTY IMPORTED_LOCATION + ${TENSORRT_LIBRARY}) + set_property( + TARGET caffe2::tensorrt PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${TENSORRT_INCLUDE_DIR}) +endif() + +# nvrtc +add_library(caffe2::nvrtc INTERFACE IMPORTED) +set_property( + TARGET caffe2::nvrtc PROPERTY INTERFACE_LINK_LIBRARIES + CUDA::nvrtc) + +# Add onnx namepsace definition to nvcc +if(ONNX_NAMESPACE) + list(APPEND CUDA_NVCC_FLAGS "-DONNX_NAMESPACE=${ONNX_NAMESPACE}") +else() + list(APPEND CUDA_NVCC_FLAGS "-DONNX_NAMESPACE=onnx_c2") +endif() + +# Don't activate VC env again for Ninja generators with MSVC on Windows if CUDAHOSTCXX is not defined +# by adding --use-local-env. +if(MSVC AND CMAKE_GENERATOR STREQUAL "Ninja" AND NOT DEFINED ENV{CUDAHOSTCXX}) + list(APPEND CUDA_NVCC_FLAGS "--use-local-env") +endif() + +# setting nvcc arch flags +torch_cuda_get_nvcc_gencode_flag(NVCC_FLAGS_EXTRA) +# CMake 3.18 adds integrated support for architecture selection, but we can't rely on it +set(CMAKE_CUDA_ARCHITECTURES OFF) +list(APPEND CUDA_NVCC_FLAGS ${NVCC_FLAGS_EXTRA}) +message(STATUS "Added CUDA NVCC flags for: ${NVCC_FLAGS_EXTRA}") + +# disable some nvcc diagnostic that appears in boost, glog, glags, opencv, etc. +foreach(diag cc_clobber_ignored + field_without_dll_interface + base_class_has_different_dll_interface + dll_interface_conflict_none_assumed + dll_interface_conflict_dllexport_assumed + bad_friend_decl) + list(APPEND SUPPRESS_WARNING_FLAGS --diag_suppress=${diag}) +endforeach() +string(REPLACE ";" "," SUPPRESS_WARNING_FLAGS "${SUPPRESS_WARNING_FLAGS}") +list(APPEND CUDA_NVCC_FLAGS -Xcudafe ${SUPPRESS_WARNING_FLAGS}) + +set(CUDA_PROPAGATE_HOST_FLAGS_BLOCKLIST "-Werror") +if(MSVC) + list(APPEND CUDA_NVCC_FLAGS "--Werror" "cross-execution-space-call") + list(APPEND CUDA_NVCC_FLAGS "--no-host-device-move-forward") +endif() + +# Debug and Release symbol support +if(MSVC) + if(${CAFFE2_USE_MSVC_STATIC_RUNTIME}) + string(APPEND CMAKE_CUDA_FLAGS_DEBUG " -Xcompiler /MTd") + string(APPEND CMAKE_CUDA_FLAGS_MINSIZEREL " -Xcompiler /MT") + string(APPEND CMAKE_CUDA_FLAGS_RELEASE " -Xcompiler /MT") + string(APPEND CMAKE_CUDA_FLAGS_RELWITHDEBINFO " -Xcompiler /MT") + else() + string(APPEND CMAKE_CUDA_FLAGS_DEBUG " -Xcompiler /MDd") + string(APPEND CMAKE_CUDA_FLAGS_MINSIZEREL " -Xcompiler /MD") + string(APPEND CMAKE_CUDA_FLAGS_RELEASE " -Xcompiler /MD") + string(APPEND CMAKE_CUDA_FLAGS_RELWITHDEBINFO " -Xcompiler /MD") + endif() + if(CUDA_NVCC_FLAGS MATCHES "Zi") + list(APPEND CUDA_NVCC_FLAGS "-Xcompiler" "-FS") + endif() +elseif(CUDA_DEVICE_DEBUG) + list(APPEND CUDA_NVCC_FLAGS "-g" "-G") # -G enables device code debugging symbols +endif() + +# Set expt-relaxed-constexpr to suppress Eigen warnings +list(APPEND CUDA_NVCC_FLAGS "--expt-relaxed-constexpr") + +# Set expt-extended-lambda to support lambda on device +list(APPEND CUDA_NVCC_FLAGS "--expt-extended-lambda") + +foreach(FLAG ${CUDA_NVCC_FLAGS}) + string(FIND "${FLAG}" " " flag_space_position) + if(NOT flag_space_position EQUAL -1) + message(FATAL_ERROR "Found spaces in CUDA_NVCC_FLAGS entry '${FLAG}'") + endif() + string(APPEND CMAKE_CUDA_FLAGS " ${FLAG}") +endforeach() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/gflags.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/gflags.cmake new file mode 100644 index 0000000000000000000000000000000000000000..186cda1a909ab79431114d1c61de895069255389 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/gflags.cmake @@ -0,0 +1,83 @@ +# ---[ gflags + +# We will try to use the config mode first, and then manual find. +find_package(gflags CONFIG QUIET) +if(NOT TARGET gflags) + find_package(gflags MODULE QUIET) +endif() + +if(TARGET gflags) + message(STATUS "Caffe2: Found gflags with new-style gflags target.") +elseif(GFLAGS_FOUND) + message(STATUS "Caffe2: Found gflags with old-style gflag starget.") + add_library(gflags UNKNOWN IMPORTED) + set_property( + TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARY}) + set_property( + TARGET gflags PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${GFLAGS_INCLUDE_DIR}) +else() + message(STATUS + "Caffe2: Cannot find gflags automatically. Using legacy find.") + + # - Try to find GFLAGS in the legacy way. + # + # The following variables are optionally searched for defaults + # GFLAGS_ROOT_DIR: Base directory where all GFLAGS components are found + # + # The following are set after configuration is done: + # GFLAGS_FOUND + # GFLAGS_INCLUDE_DIRS + # GFLAGS_LIBRARIES + # GFLAGS_LIBRARYRARY_DIRS + include(FindPackageHandleStandardArgs) + set(GFLAGS_ROOT_DIR "" CACHE PATH "Folder contains Gflags") + + # We are testing only a couple of files in the include directories + if(WIN32) + find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h + PATHS ${GFLAGS_ROOT_DIR}/src/windows) + else() + find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h + PATHS ${GFLAGS_ROOT_DIR}) + endif() + + if(WIN32) + find_library(GFLAGS_LIBRARY_RELEASE + NAMES libgflags + PATHS ${GFLAGS_ROOT_DIR} + PATH_SUFFIXES Release) + + find_library(GFLAGS_LIBRARY_DEBUG + NAMES libgflags-debug + PATHS ${GFLAGS_ROOT_DIR} + PATH_SUFFIXES Debug) + set(GFLAGS_LIBRARY optimized ${GFLAGS_LIBRARY_RELEASE} debug ${GFLAGS_LIBRARY_DEBUG}) + else() + find_library(GFLAGS_LIBRARY gflags) + endif() + + find_package_handle_standard_args( + gflags DEFAULT_MSG GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY) + + if(GFLAGS_FOUND) + message( + STATUS + "Caffe2: Found gflags (include: ${GFLAGS_INCLUDE_DIR}, " + "library: ${GFLAGS_LIBRARY})") + add_library(gflags UNKNOWN IMPORTED) + set_property( + TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARY}) + set_property( + TARGET gflags PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${GFLAGS_INCLUDE_DIR}) + endif() +endif() + +# After above, we should have the gflags target now. +if(NOT TARGET gflags) + message(WARNING + "Caffe2: gflags cannot be found. Depending on whether you are building " + "Caffe2 or a Caffe2 dependent library, the next warning / error will " + "give you more info.") +endif() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/glog.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/glog.cmake new file mode 100644 index 0000000000000000000000000000000000000000..bb03e81f29e3afed43ba95260cc5c298be881f72 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/glog.cmake @@ -0,0 +1,70 @@ +# ---[ glog + +# We will try to use the config mode first, and then manual find. +find_package(glog CONFIG QUIET) +if(NOT TARGET glog::glog) + find_package(glog MODULE QUIET) +endif() + +if(TARGET glog::glog) + message(STATUS "Caffe2: Found glog with new-style glog target.") +elseif(GLOG_FOUND) + message( + STATUS + "Caffe2: Found glog with old-style glog starget. Glog never shipped " + "old style glog targets, so somewhere in your cmake path there might " + "be a custom Findglog.cmake file that got triggered. We will make a " + "best effort to create the new style glog target for you.") + add_library(glog::glog UNKNOWN IMPORTED) + set_property( + TARGET glog::glog PROPERTY IMPORTED_LOCATION ${GLOG_LIBRARY}) + set_property( + TARGET glog::glog PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${GLOG_INCLUDE_DIR}) +else() + message(STATUS "Caffe2: Cannot find glog automatically. Using legacy find.") + + # - Try to find Glog + # + # The following variables are optionally searched for defaults + # GLOG_ROOT_DIR: Base directory where all GLOG components are found + # + # The following are set after configuration is done: + # GLOG_FOUND + # GLOG_INCLUDE_DIRS + # GLOG_LIBRARIES + # GLOG_LIBRARYRARY_DIRS + + include(FindPackageHandleStandardArgs) + set(GLOG_ROOT_DIR "" CACHE PATH "Folder contains Google glog") + if(NOT WIN32) + find_path(GLOG_INCLUDE_DIR glog/logging.h + PATHS ${GLOG_ROOT_DIR}) + endif() + + find_library(GLOG_LIBRARY glog + PATHS ${GLOG_ROOT_DIR} + PATH_SUFFIXES lib lib64) + + find_package_handle_standard_args(glog DEFAULT_MSG GLOG_INCLUDE_DIR GLOG_LIBRARY) + + if(GLOG_FOUND) + message(STATUS + "Caffe2: Found glog (include: ${GLOG_INCLUDE_DIR}, " + "library: ${GLOG_LIBRARY})") + add_library(glog::glog UNKNOWN IMPORTED) + set_property( + TARGET glog::glog PROPERTY IMPORTED_LOCATION ${GLOG_LIBRARY}) + set_property( + TARGET glog::glog PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${GLOG_INCLUDE_DIR}) + endif() +endif() + +# After above, we should have the glog::glog target now. +if(NOT TARGET glog::glog) + message(WARNING + "Caffe2: glog cannot be found. Depending on whether you are building " + "Caffe2 or a Caffe2 dependent library, the next warning / error will " + "give you more info.") +endif() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/mkl.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/mkl.cmake new file mode 100644 index 0000000000000000000000000000000000000000..68bf1b9dc9382c3717d4d017d9bd07d8d7c23c4c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/mkl.cmake @@ -0,0 +1,23 @@ +find_package(MKL QUIET) + +if(TARGET caffe2::mkl) + return() +endif() + +add_library(caffe2::mkl INTERFACE IMPORTED) +target_include_directories(caffe2::mkl INTERFACE ${MKL_INCLUDE_DIR}) +target_link_libraries(caffe2::mkl INTERFACE ${MKL_LIBRARIES}) +foreach(MKL_LIB IN LISTS MKL_LIBRARIES) + if(EXISTS "${MKL_LIB}") + get_filename_component(MKL_LINK_DIR "${MKL_LIB}" DIRECTORY) + if(IS_DIRECTORY "${MKL_LINK_DIR}") + target_link_directories(caffe2::mkl INTERFACE "${MKL_LINK_DIR}") + endif() + endif() +endforeach() + +# TODO: This is a hack, it will not pick up architecture dependent +# MKL libraries correctly; see https://github.com/pytorch/pytorch/issues/73008 +set_property( + TARGET caffe2::mkl PROPERTY INTERFACE_LINK_DIRECTORIES + ${MKL_ROOT}/lib ${MKL_ROOT}/lib/intel64 ${MKL_ROOT}/lib/intel64_win ${MKL_ROOT}/lib/win-x64) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/mkldnn.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/mkldnn.cmake new file mode 100644 index 0000000000000000000000000000000000000000..87935625f9bfb543d1cdc7f2b59f11e8d4a709e7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/mkldnn.cmake @@ -0,0 +1,18 @@ +set(MKLDNN_USE_NATIVE_ARCH ${USE_NATIVE_ARCH}) + +if(CPU_AARCH64) + include(${CMAKE_CURRENT_LIST_DIR}/ComputeLibrary.cmake) +endif() + +find_package(MKLDNN QUIET) + +if(NOT TARGET caffe2::mkldnn) + add_library(caffe2::mkldnn INTERFACE IMPORTED) +endif() + +set_property( + TARGET caffe2::mkldnn PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${MKLDNN_INCLUDE_DIR}) +set_property( + TARGET caffe2::mkldnn PROPERTY INTERFACE_LINK_LIBRARIES + ${MKLDNN_LIBRARIES}) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/protobuf.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/protobuf.cmake new file mode 100644 index 0000000000000000000000000000000000000000..77ec3622b132dc7a7817716dd24ef986e6ac030d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/protobuf.cmake @@ -0,0 +1,92 @@ +# ---[ Protobuf + +# We will try to use the config mode first, and then manual find. +find_package(Protobuf CONFIG QUIET) +if(NOT Protobuf_FOUND) + find_package(Protobuf MODULE QUIET) +endif() + +if((TARGET protobuf::libprotobuf OR TARGET protobuf::libprotobuf-lite) AND TARGET protobuf::protoc) + # Hooray. This is the most ideal situation, meaning that you either have a + # Protobuf config file installed (like on Windows), or you are using a + # modern CMake that ships with a FindProtobuf.cmake file that produces + # modern targets. + message(STATUS "Caffe2: Found protobuf with new-style protobuf targets.") +elseif(Protobuf_FOUND OR PROTOBUF_FOUND) + # If the modern targets are not present, we will generate them for you for + # backward compatibility. This is backported from CMake's new FindProtobuf.cmake + # content. + if((NOT PROTOBUF_LIBRARY) AND (NOT PROTOBUF_LITE_LIBRARY)) + message(FATAL_ERROR + "Caffe2: Found protobuf with old style targets, but could not find targets." + " PROTOBUF_LIBRARY: " ${PROTOBUF_LIBRARY} + " PROTOBUF_LITE_LIBRARY: " ${PROTOBUF_LITE_LIBRARY} + " Protobuf_LIBRARY: " ${Protobuf_LIBRARY} + " Protobuf_LITE_LIBRARY: " ${Protobuf_LITE_LIBRARY}) + endif() + message(STATUS "Caffe2: Found protobuf with old-style protobuf targets.") + + if(PROTOBUF_LIBRARY) + if(NOT TARGET protobuf::libprotobuf) + add_library(protobuf::libprotobuf UNKNOWN IMPORTED) + set_target_properties(protobuf::libprotobuf PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${PROTOBUF_INCLUDE_DIRS}") + endif() + if(EXISTS "${PROTOBUF_LIBRARY}") + set_target_properties(protobuf::libprotobuf PROPERTIES + IMPORTED_LOCATION "${PROTOBUF_LIBRARY}") + endif() + if(EXISTS "${PROTOBUF_LIBRARY_RELEASE}") + set_property(TARGET protobuf::libprotobuf APPEND PROPERTY + IMPORTED_CONFIGURATIONS RELEASE) + set_target_properties(protobuf::libprotobuf PROPERTIES + IMPORTED_LOCATION_RELEASE "${PROTOBUF_LIBRARY_RELEASE}") + endif() + if(EXISTS "${PROTOBUF_LIBRARY_DEBUG}") + set_property(TARGET protobuf::libprotobuf APPEND PROPERTY + IMPORTED_CONFIGURATIONS DEBUG) + set_target_properties(protobuf::libprotobuf PROPERTIES + IMPORTED_LOCATION_DEBUG "${PROTOBUF_LIBRARY_DEBUG}") + endif() + endif() + + if(PROTOBUF_LITE_LIBRARY) + if(NOT TARGET protobuf::libprotobuf-lite) + add_library(protobuf::libprotobuf-lite UNKNOWN IMPORTED) + set_target_properties(protobuf::libprotobuf-lite PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${PROTOBUF_INCLUDE_DIRS}") + endif() + if(EXISTS "${PROTOBUF_LITE_LIBRARY}") + set_target_properties(protobuf::libprotobuf-lite PROPERTIES + IMPORTED_LOCATION "${PROTOBUF_LITE_LIBRARY}") + endif() + if(EXISTS "${PROTOBUF_LITE_LIBRARY_RELEASE}") + set_property(TARGET protobuf::libprotobuf-lite APPEND PROPERTY + IMPORTED_CONFIGURATIONS RELEASE) + set_target_properties(protobuf::libprotobuf-lite PROPERTIES + IMPORTED_LOCATION_RELEASE "${PROTOBUF_LITE_LIBRARY_RELEASE}") + endif() + if(EXISTS "${PROTOBUF_LITE_LIBRARY_DEBUG}") + set_property(TARGET protobuf::libprotobuf-lite APPEND PROPERTY + IMPORTED_CONFIGURATIONS DEBUG) + set_target_properties(protobuf::libprotobuf-lite PROPERTIES + IMPORTED_LOCATION_DEBUG "${PROTOBUF_LITE_LIBRARY_DEBUG}") + endif() + endif() + + if(PROTOBUF_PROTOC_EXECUTABLE) + if(NOT TARGET protobuf::protoc) + add_executable(protobuf::protoc IMPORTED) + endif() + set_property(TARGET protobuf::protoc PROPERTY + IMPORTED_LOCATION ${PROTOBUF_PROTOC_EXECUTABLE}) + endif() +endif() + +# After above, we should have the protobuf related target now. +if((NOT TARGET protobuf::libprotobuf) AND (NOT TARGET protobuf::libprotobuf-lite)) + message(WARNING + "Protobuf cannot be found. Depending on whether you are building Caffe2 " + "or a Caffe2 dependent library, the next warning / error will give you " + "more info.") +endif() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/utils.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/utils.cmake new file mode 100644 index 0000000000000000000000000000000000000000..78a90dbc587ca70f1d4d8593b81cc33eeec517e8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/utils.cmake @@ -0,0 +1,559 @@ +################################################################################################ +# Exclude and prepend functionalities +function(exclude OUTPUT INPUT) +set(EXCLUDES ${ARGN}) +foreach(EXCLUDE ${EXCLUDES}) + list(REMOVE_ITEM INPUT "${EXCLUDE}") +endforeach() +set(${OUTPUT} ${INPUT} PARENT_SCOPE) +endfunction(exclude) + +function(prepend OUTPUT PREPEND) +set(OUT "") +foreach(ITEM ${ARGN}) + list(APPEND OUT "${PREPEND}${ITEM}") +endforeach() +set(${OUTPUT} ${OUT} PARENT_SCOPE) +endfunction(prepend) + + +################################################################################################ +# Clears variables from list +# Usage: +# caffe_clear_vars() +macro(caffe_clear_vars) + foreach(_var ${ARGN}) + unset(${_var}) + endforeach() +endmacro() + +################################################################################################ +# Prints list element per line +# Usage: +# caffe_print_list() +function(caffe_print_list) + foreach(e ${ARGN}) + message(STATUS ${e}) + endforeach() +endfunction() + +################################################################################################ +# Reads set of version defines from the header file +# Usage: +# caffe_parse_header( ..) +macro(caffe_parse_header FILENAME FILE_VAR) + set(vars_regex "") + set(__parnet_scope OFF) + set(__add_cache OFF) + foreach(name ${ARGN}) + if("${name}" STREQUAL "PARENT_SCOPE") + set(__parnet_scope ON) + elseif("${name}" STREQUAL "CACHE") + set(__add_cache ON) + elseif(vars_regex) + set(vars_regex "${vars_regex}|${name}") + else() + set(vars_regex "${name}") + endif() + endforeach() + if(EXISTS "${FILENAME}") + file(STRINGS "${FILENAME}" ${FILE_VAR} REGEX "#define[ \t]+(${vars_regex})[ \t]+[0-9]+" ) + else() + unset(${FILE_VAR}) + endif() + foreach(name ${ARGN}) + if(NOT "${name}" STREQUAL "PARENT_SCOPE" AND NOT "${name}" STREQUAL "CACHE") + if(${FILE_VAR}) + if(${FILE_VAR} MATCHES ".+[ \t]${name}[ \t]+([0-9]+).*") + string(REGEX REPLACE ".+[ \t]${name}[ \t]+([0-9]+).*" "\\1" ${name} "${${FILE_VAR}}") + else() + set(${name} "") + endif() + if(__add_cache) + set(${name} ${${name}} CACHE INTERNAL "${name} parsed from ${FILENAME}" FORCE) + elseif(__parnet_scope) + set(${name} "${${name}}" PARENT_SCOPE) + endif() + else() + unset(${name} CACHE) + endif() + endif() + endforeach() +endmacro() + +################################################################################################ +# Parses a version string that might have values beyond major, minor, and patch +# and set version variables for the library. +# Usage: +# caffe2_parse_version_str( ) +function(caffe2_parse_version_str LIBNAME VERSIONSTR) + string(REGEX REPLACE "^([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_MAJOR "${VERSIONSTR}") + string(REGEX REPLACE "^[0-9]+\\.([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_MINOR "${VERSIONSTR}") + string(REGEX REPLACE "[0-9]+\\.[0-9]+\\.([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_PATCH "${VERSIONSTR}") + set(${LIBNAME}_VERSION_MAJOR ${${LIBNAME}_VERSION_MAJOR} ${ARGN} PARENT_SCOPE) + set(${LIBNAME}_VERSION_MINOR ${${LIBNAME}_VERSION_MINOR} ${ARGN} PARENT_SCOPE) + set(${LIBNAME}_VERSION_PATCH ${${LIBNAME}_VERSION_PATCH} ${ARGN} PARENT_SCOPE) + set(${LIBNAME}_VERSION "${${LIBNAME}_VERSION_MAJOR}.${${LIBNAME}_VERSION_MINOR}.${${LIBNAME}_VERSION_PATCH}" PARENT_SCOPE) +endfunction() + +### +# Removes common indentation from a block of text to produce code suitable for +# setting to `python -c`, or using with pycmd. This allows multiline code to be +# nested nicely in the surrounding code structure. +# +# This function respsects PYTHON_EXECUTABLE if it defined, otherwise it uses +# `python` and hopes for the best. An error will be thrown if it is not found. +# +# Args: +# outvar : variable that will hold the stdout of the python command +# text : text to remove indentation from +# +function(dedent outvar text) + # Use PYTHON_EXECUTABLE if it is defined, otherwise default to python + if("${PYTHON_EXECUTABLE}" STREQUAL "") + set(_python_exe "python") + else() + set(_python_exe "${PYTHON_EXECUTABLE}") + endif() + set(_fixup_cmd "import sys; from textwrap import dedent; print(dedent(sys.stdin.read()))") + file(WRITE "${CMAKE_BINARY_DIR}/indented.txt" "${text}") + execute_process( + COMMAND "${_python_exe}" -c "${_fixup_cmd}" + INPUT_FILE "${CMAKE_BINARY_DIR}/indented.txt" + RESULT_VARIABLE _dedent_exitcode + OUTPUT_VARIABLE _dedent_text) + if(NOT _dedent_exitcode EQUAL 0) + message(ERROR " Failed to remove indentation from: \n\"\"\"\n${text}\n\"\"\" + Python dedent failed with error code: ${_dedent_exitcode}") + message(FATAL_ERROR " Python dedent failed with error code: ${_dedent_exitcode}") + endif() + # Remove supurflous newlines (artifacts of print) + string(STRIP "${_dedent_text}" _dedent_text) + set(${outvar} "${_dedent_text}" PARENT_SCOPE) +endfunction() + + +function(pycmd_no_exit outvar exitcode cmd) + # Use PYTHON_EXECUTABLE if it is defined, otherwise default to python + if("${PYTHON_EXECUTABLE}" STREQUAL "") + set(_python_exe "python") + else() + set(_python_exe "${PYTHON_EXECUTABLE}") + endif() + # run the actual command + execute_process( + COMMAND "${_python_exe}" -c "${cmd}" + RESULT_VARIABLE _exitcode + OUTPUT_VARIABLE _output) + # Remove supurflous newlines (artifacts of print) + string(STRIP "${_output}" _output) + set(${outvar} "${_output}" PARENT_SCOPE) + set(${exitcode} "${_exitcode}" PARENT_SCOPE) +endfunction() + + +### +# Helper function to run `python -c ""` and capture the results of stdout +# +# Runs a python command and populates an outvar with the result of stdout. +# Common indentation in the text of `cmd` is removed before the command is +# executed, so the caller does not need to worry about indentation issues. +# +# This function respsects PYTHON_EXECUTABLE if it defined, otherwise it uses +# `python` and hopes for the best. An error will be thrown if it is not found. +# +# Args: +# outvar : variable that will hold the stdout of the python command +# cmd : text representing a (possibly multiline) block of python code +# +function(pycmd outvar cmd) + dedent(_dedent_cmd "${cmd}") + pycmd_no_exit(_output _exitcode "${_dedent_cmd}") + + if(NOT _exitcode EQUAL 0) + message(ERROR " Failed when running python code: \"\"\"\n${_dedent_cmd}\n\"\"\"") + message(FATAL_ERROR " Python command failed with error code: ${_exitcode}") + endif() + # Remove supurflous newlines (artifacts of print) + string(STRIP "${_output}" _output) + set(${outvar} "${_output}" PARENT_SCOPE) +endfunction() + + +############################################################################## +# Macro to update cached options. +macro(caffe2_update_option variable value) + if(CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO) + get_property(__help_string CACHE ${variable} PROPERTY HELPSTRING) + set(${variable} ${value} CACHE BOOL ${__help_string} FORCE) + else() + set(${variable} ${value}) + endif() +endmacro() + + +############################################################################## +# Add an interface library definition that is dependent on the source. +# +# It's probably easiest to explain why this macro exists, by describing +# what things would look like if we didn't have this macro. +# +# Let's suppose we want to statically link against torch. We've defined +# a library in cmake called torch, and we might think that we just +# target_link_libraries(my-app PUBLIC torch). This will result in a +# linker argument 'libtorch.a' getting passed to the linker. +# +# Unfortunately, this link command is wrong! We have static +# initializers in libtorch.a that would get improperly pruned by +# the default link settings. What we actually need is for you +# to do -Wl,--whole-archive,libtorch.a -Wl,--no-whole-archive to ensure +# that we keep all symbols, even if they are (seemingly) not used. +# +# What caffe2_interface_library does is create an interface library +# that indirectly depends on the real library, but sets up the link +# arguments so that you get all of the extra link settings you need. +# The result is not a "real" library, and so we have to manually +# copy over necessary properties from the original target. +# +# (The discussion above is about static libraries, but a similar +# situation occurs for dynamic libraries: if no symbols are used from +# a dynamic library, it will be pruned unless you are --no-as-needed) +macro(caffe2_interface_library SRC DST) + add_library(${DST} INTERFACE) + add_dependencies(${DST} ${SRC}) + # Depending on the nature of the source library as well as the compiler, + # determine the needed compilation flags. + get_target_property(__src_target_type ${SRC} TYPE) + # Depending on the type of the source library, we will set up the + # link command for the specific SRC library. + if(${__src_target_type} STREQUAL "STATIC_LIBRARY") + # In the case of static library, we will need to add whole-static flags. + if(APPLE) + target_link_libraries( + ${DST} INTERFACE -Wl,-force_load,\"$\") + elseif(MSVC) + # In MSVC, we will add whole archive in default. + target_link_libraries( + ${DST} INTERFACE "$") + target_link_options( + ${DST} INTERFACE "-WHOLEARCHIVE:$") + else() + # Assume everything else is like gcc + target_link_libraries(${DST} INTERFACE + "-Wl,--whole-archive,\"$\" -Wl,--no-whole-archive") + endif() + # Link all interface link libraries of the src target as well. + # For static library, we need to explicitly depend on all the libraries + # that are the dependent library of the source library. Note that we cannot + # use the populated INTERFACE_LINK_LIBRARIES property, because if one of the + # dependent library is not a target, cmake creates a $ wrapper + # and then one is not able to find target "src". For more discussions, check + # https://gitlab.kitware.com/cmake/cmake/issues/15415 + # https://cmake.org/pipermail/cmake-developers/2013-May/019019.html + # Specifically the following quote + # + # """ + # For STATIC libraries we can define that the PUBLIC/PRIVATE/INTERFACE keys + # are ignored for linking and that it always populates both LINK_LIBRARIES + # LINK_INTERFACE_LIBRARIES. Note that for STATIC libraries the + # LINK_LIBRARIES property will not be used for anything except build-order + # dependencies. + # """ + target_link_libraries(${DST} INTERFACE + $) + elseif(${__src_target_type} STREQUAL "SHARED_LIBRARY") + if("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU") + target_link_libraries(${DST} INTERFACE + "-Wl,--no-as-needed,\"$\" -Wl,--as-needed") + else() + target_link_libraries(${DST} INTERFACE ${SRC}) + endif() + # Link all interface link libraries of the src target as well. + # For shared libraries, we can simply depend on the INTERFACE_LINK_LIBRARIES + # property of the target. + target_link_libraries(${DST} INTERFACE + $) + else() + message(FATAL_ERROR + "You made a CMake build file error: target " ${SRC} + " must be of type either STATIC_LIBRARY or SHARED_LIBRARY. However, " + "I got " ${__src_target_type} ".") + endif() + # For all other interface properties, manually inherit from the source target. + set_target_properties(${DST} PROPERTIES + INTERFACE_COMPILE_DEFINITIONS + $ + INTERFACE_COMPILE_OPTIONS + $ + INTERFACE_INCLUDE_DIRECTORIES + $ + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES + $) +endmacro() + + +############################################################################## +# Creating a Caffe2 binary target with sources specified with relative path. +# Usage: +# caffe2_binary_target(target_name_or_src [] [] ...) +# If only target_name_or_src is specified, this target is build with one single +# source file and the target name is autogen from the filename. Otherwise, the +# target name is given by the first argument and the rest are the source files +# to build the target. +function(caffe2_binary_target target_name_or_src) + # https://cmake.org/cmake/help/latest/command/function.html + # Checking that ARGC is greater than # is the only way to ensure + # that ARGV# was passed to the function as an extra argument. + if(ARGC GREATER 1) + set(__target ${target_name_or_src}) + prepend(__srcs "${CMAKE_CURRENT_SOURCE_DIR}/" "${ARGN}") + else() + get_filename_component(__target ${target_name_or_src} NAME_WE) + prepend(__srcs "${CMAKE_CURRENT_SOURCE_DIR}/" "${target_name_or_src}") + endif() + add_executable(${__target} ${__srcs}) + target_link_libraries(${__target} torch_library) + # If we have Caffe2_MODULES defined, we will also link with the modules. + if(DEFINED Caffe2_MODULES) + target_link_libraries(${__target} ${Caffe2_MODULES}) + endif() + if(USE_TBB AND NOT USE_SYSTEM_TBB) + target_include_directories(${__target} PUBLIC ${TBB_INCLUDE_DIR}) + endif() + install(TARGETS ${__target} DESTINATION bin) +endfunction() + +function(caffe2_hip_binary_target target_name_or_src) + if(ARGC GREATER 1) + set(__target ${target_name_or_src}) + prepend(__srcs "${CMAKE_CURRENT_SOURCE_DIR}/" "${ARGN}") + else() + get_filename_component(__target ${target_name_or_src} NAME_WE) + prepend(__srcs "${CMAKE_CURRENT_SOURCE_DIR}/" "${target_name_or_src}") + endif() + + caffe2_binary_target(${target_name_or_src}) + + target_compile_options(${__target} PRIVATE ${HIP_CXX_FLAGS}) + target_include_directories(${__target} PRIVATE ${Caffe2_HIP_INCLUDE}) +endfunction() + + +############################################################################## +# Multiplex between adding libraries for CUDA versus HIP (AMD Software Stack). +# Usage: +# torch_cuda_based_add_library(cuda_target) +# +macro(torch_cuda_based_add_library cuda_target) + if(USE_ROCM) + hip_add_library(${cuda_target} ${ARGN}) + elseif(USE_CUDA) + add_library(${cuda_target} ${ARGN}) + else() + endif() +endmacro() + +############################################################################## +# Get the HIP arch flags specified by PYTORCH_ROCM_ARCH. +# Usage: +# torch_hip_get_arch_list(variable_to_store_flags) +# +macro(torch_hip_get_arch_list store_var) + if(DEFINED ENV{PYTORCH_ROCM_ARCH}) + set(_TMP $ENV{PYTORCH_ROCM_ARCH}) + else() + # Use arch of installed GPUs as default + execute_process(COMMAND "rocm_agent_enumerator" COMMAND bash "-c" "grep -v gfx000 | sort -u | xargs | tr -d '\n'" + RESULT_VARIABLE ROCM_AGENT_ENUMERATOR_RESULT + OUTPUT_VARIABLE ROCM_ARCH_INSTALLED) + if(NOT ROCM_AGENT_ENUMERATOR_RESULT EQUAL 0) + message(FATAL_ERROR " Could not detect ROCm arch for GPUs on machine. Result: '${ROCM_AGENT_ENUMERATOR_RESULT}'") + endif() + set(_TMP ${ROCM_ARCH_INSTALLED}) + endif() + string(REPLACE " " ";" ${store_var} "${_TMP}") +endmacro() + +############################################################################## +# Get the NVCC arch flags specified by TORCH_CUDA_ARCH_LIST and CUDA_ARCH_NAME. +# Usage: +# torch_cuda_get_nvcc_gencode_flag(variable_to_store_flags) +# +macro(torch_cuda_get_nvcc_gencode_flag store_var) + # setting nvcc arch flags + if((NOT DEFINED TORCH_CUDA_ARCH_LIST) AND (DEFINED ENV{TORCH_CUDA_ARCH_LIST})) + message(WARNING + "In the future we will require one to explicitly pass " + "TORCH_CUDA_ARCH_LIST to cmake instead of implicitly setting it as an " + "env variable. This will become a FATAL_ERROR in future version of " + "pytorch.") + set(TORCH_CUDA_ARCH_LIST $ENV{TORCH_CUDA_ARCH_LIST}) + endif() + if(DEFINED CUDA_ARCH_NAME) + message(WARNING + "CUDA_ARCH_NAME is no longer used. Use TORCH_CUDA_ARCH_LIST instead. " + "Right now, CUDA_ARCH_NAME is ${CUDA_ARCH_NAME} and " + "TORCH_CUDA_ARCH_LIST is ${TORCH_CUDA_ARCH_LIST}.") + set(TORCH_CUDA_ARCH_LIST TORCH_CUDA_ARCH_LIST ${CUDA_ARCH_NAME}) + endif() + + # Invoke cuda_select_nvcc_arch_flags from proper cmake FindCUDA. + cuda_select_nvcc_arch_flags(${store_var} ${TORCH_CUDA_ARCH_LIST}) +endmacro() + + +############################################################################## +# Add standard compile options. +# Usage: +# torch_compile_options(lib_name) +function(torch_compile_options libname) + set_property(TARGET ${libname} PROPERTY CXX_STANDARD 17) + set(private_compile_options "") + + # ---[ Check if warnings should be errors. + if(WERROR) + list(APPEND private_compile_options -Werror) + endif() + + # until they can be unified, keep these lists synced with setup.py + if(MSVC) + + if(MSVC_Z7_OVERRIDE) + set(MSVC_DEBINFO_OPTION "/Z7") + else() + set(MSVC_DEBINFO_OPTION "/Zi") + endif() + + target_compile_options(${libname} PUBLIC + $<$: + ${MSVC_RUNTIME_LIBRARY_OPTION} + $<$,$>:${MSVC_DEBINFO_OPTION}> + /EHsc + /bigobj> + ) + else() + list(APPEND private_compile_options + -Wall + -Wextra + -Wdeprecated + -Wno-unused-parameter + -Wno-unused-function + -Wno-missing-field-initializers + -Wno-unknown-pragmas + -Wno-type-limits + -Wno-array-bounds + -Wno-unknown-pragmas + -Wno-strict-overflow + -Wno-strict-aliasing + ) + if(NOT "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + list(APPEND private_compile_options + # Considered to be flaky. See the discussion at + # https://github.com/pytorch/pytorch/pull/9608 + -Wno-maybe-uninitialized) + endif() + + endif() + + if(MSVC) + elseif(WERROR) + list(APPEND private_compile_options -Wno-strict-overflow) + endif() + + target_compile_options(${libname} PRIVATE + $<$:${private_compile_options}>) + if(USE_CUDA) + string(FIND "${private_compile_options}" " " space_position) + if(NOT space_position EQUAL -1) + message(FATAL_ERROR "Found spaces in private_compile_options='${private_compile_options}'") + endif() + # Convert CMake list to comma-separated list + string(REPLACE ";" "," private_compile_options "${private_compile_options}") + target_compile_options(${libname} PRIVATE + $<$:-Xcompiler=${private_compile_options}>) + endif() + + if(NOT WIN32 AND NOT USE_ASAN) + # Enable hidden visibility by default to make it easier to debug issues with + # TORCH_API annotations. Hidden visibility with selective default visibility + # behaves close enough to Windows' dllimport/dllexport. + # + # Unfortunately, hidden visibility messes up some ubsan warnings because + # templated classes crossing library boundary get duplicated (but identical) + # definitions. It's easier to just disable it. + target_compile_options(${libname} PRIVATE + $<$: -fvisibility=hidden>) + endif() + + # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in perf regression) + target_compile_options(${libname} PRIVATE + $<$,$,$>>:-O2>) + +endfunction() + +############################################################################## +# Set old-style FindCuda.cmake compile flags from modern CMake cuda flags. +# Usage: +# torch_update_find_cuda_flags() +function(torch_update_find_cuda_flags) + # Convert -O2 -Xcompiler="-O2 -Wall" to "-O2;-Xcompiler=-O2,-Wall" + if(USE_CUDA) + separate_arguments(FLAGS UNIX_COMMAND "${CMAKE_CUDA_FLAGS}") + string(REPLACE " " "," FLAGS "${FLAGS}") + set(CUDA_NVCC_FLAGS ${FLAGS} PARENT_SCOPE) + + separate_arguments(FLAGS_DEBUG UNIX_COMMAND "${CMAKE_CUDA_FLAGS_DEBUG}") + string(REPLACE " " "," FLAGS_DEBUG "${FLAGS_DEBUG}") + set(CUDA_NVCC_FLAGS_DEBUG "${FLAGS_DEBUG}" PARENT_SCOPE) + + separate_arguments(FLAGS_RELEASE UNIX_COMMAND "${CMAKE_CUDA_FLAGS_RELEASE}") + string(REPLACE " " "," FLAGS_RELEASE "${FLAGS_RELEASE}") + set(CUDA_NVCC_FLAGS_RELEASE "${FLAGS_RELEASE}" PARENT_SCOPE) + + separate_arguments(FLAGS_MINSIZEREL UNIX_COMMAND "${CMAKE_CUDA_FLAGS_MINSIZEREL}") + string(REPLACE " " "," FLAGS_MINSIZEREL "${FLAGS_MINSIZEREL}") + set(CUDA_NVCC_FLAGS_MINSIZEREL "${FLAGS_MINSIZEREL}" PARENT_SCOPE) + + separate_arguments(FLAGS_RELWITHDEBINFO UNIX_COMMAND "${CMAKE_CUDA_FLAGS_RELWITHDEBINFO}") + string(REPLACE " " "," FLAGS_RELWITHDEBINFO "${FLAGS_RELWITHDEBINFO}") + set(CUDA_NVCC_FLAGS_RELWITHDEBINFO "${FLAGS_RELWITHDEBINFO}" PARENT_SCOPE) + + message(STATUS "Converting CMAKE_CUDA_FLAGS to CUDA_NVCC_FLAGS:\n" + " CUDA_NVCC_FLAGS = ${FLAGS}\n" + " CUDA_NVCC_FLAGS_DEBUG = ${FLAGS_DEBUG}\n" + " CUDA_NVCC_FLAGS_RELEASE = ${FLAGS_RELEASE}\n" + " CUDA_NVCC_FLAGS_RELWITHDEBINFO = ${FLAGS_RELWITHDEBINFO}\n" + " CUDA_NVCC_FLAGS_MINSIZEREL = ${FLAGS_MINSIZEREL}") + endif() +endfunction() + +include(CheckCXXCompilerFlag) + +############################################################################## +# CHeck if given flag is supported and append it to provided outputvar +# Also define HAS_UPPER_CASE_FLAG_NAME variable +# Usage: +# append_cxx_flag_if_supported("-Werror" CMAKE_CXX_FLAGS) +function(append_cxx_flag_if_supported flag outputvar) + string(TOUPPER "HAS${flag}" _FLAG_NAME) + string(REGEX REPLACE "[=-]" "_" _FLAG_NAME "${_FLAG_NAME}") + # GCC silents unknown -Wno-XXX flags, so we detect the corresponding -WXXX. + if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + string(REGEX REPLACE "Wno-" "W" new_flag "${flag}") + else() + set(new_flag ${flag}) + endif() + check_cxx_compiler_flag("${new_flag}" ${_FLAG_NAME}) + if(${_FLAG_NAME}) + string(APPEND ${outputvar} " ${flag}") + set(${outputvar} "${${outputvar}}" PARENT_SCOPE) + endif() +endfunction() + +function(target_compile_options_if_supported target flag) + set(_compile_options "") + append_cxx_flag_if_supported("${flag}" _compile_options) + if(NOT "${_compile_options}" STREQUAL "") + target_compile_options(${target} PRIVATE ${flag}) + endif() +endfunction() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/xpu.cmake b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/xpu.cmake new file mode 100644 index 0000000000000000000000000000000000000000..d1a442f8efd419afb8c8236cf8c9b880cb1d8b0b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/share/cmake/Caffe2/public/xpu.cmake @@ -0,0 +1,30 @@ +# ---[ xpu + +# Poor man's include guard +if(TARGET torch::xpurt) + return() +endif() + +# Find SYCL library. +find_package(SYCLToolkit REQUIRED) +if(NOT SYCL_FOUND) + set(PYTORCH_FOUND_XPU FALSE) + return() +endif() +set(PYTORCH_FOUND_XPU TRUE) + +# SYCL library interface +add_library(torch::sycl INTERFACE IMPORTED) + +set_property( + TARGET torch::sycl PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${SYCL_INCLUDE_DIR}) +set_property( + TARGET torch::sycl PROPERTY INTERFACE_LINK_LIBRARIES + ${SYCL_LIBRARY}) + +# xpurt +add_library(torch::xpurt INTERFACE IMPORTED) +set_property( + TARGET torch::xpurt PROPERTY INTERFACE_LINK_LIBRARIES + torch::sycl)