applied-ai-018 commited on
Commit
7d2720a
·
verified ·
1 Parent(s): 1583dd5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_transformation.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/code_context.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/convert_frame.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/debug_utils.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/decorators.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/exc.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/logging.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/replay_record.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/side_effects.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/symbolic_convert.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_case.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_minifier_common.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__init__.py +1944 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__pycache__/__init__.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__pycache__/wrappers.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/_prims_common/wrappers.py +399 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/cuda/__init__.py +1421 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/cuda/_memory_viz.py +626 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/cuda/_sanitizer.py +622 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/cuda/_utils.py +54 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__init__.py +9 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py +144 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/common.py +9 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py +679 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/cuda/comm.py +18 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/cuda/graphs.py +476 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/cuda/jiterator.py +185 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/cuda/memory.py +914 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/cuda/nccl.py +137 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/cuda/nvtx.py +91 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/cuda/profiler.py +61 -0
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_transformation.cpython-310.pyc ADDED
Binary file (31.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/code_context.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/convert_frame.cpython-310.pyc ADDED
Binary file (19.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/debug_utils.cpython-310.pyc ADDED
Binary file (19.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/decorators.cpython-310.pyc ADDED
Binary file (8.63 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/exc.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/logging.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc ADDED
Binary file (4.11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/replay_record.cpython-310.pyc ADDED
Binary file (4.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/side_effects.cpython-310.pyc ADDED
Binary file (15.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/symbolic_convert.cpython-310.pyc ADDED
Binary file (73.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_case.cpython-310.pyc ADDED
Binary file (2.29 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_minifier_common.cpython-310.pyc ADDED
Binary file (7.16 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__init__.py ADDED
@@ -0,0 +1,1944 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import operator
4
+ import warnings
5
+ import weakref
6
+
7
+ from contextlib import nullcontext
8
+ from enum import Enum
9
+ from functools import cmp_to_key, reduce
10
+ from typing import (
11
+ Any,
12
+ Callable,
13
+ cast,
14
+ List,
15
+ Optional,
16
+ overload,
17
+ Sequence,
18
+ Tuple,
19
+ Type,
20
+ TYPE_CHECKING,
21
+ Union,
22
+ )
23
+
24
+
25
+ if TYPE_CHECKING:
26
+ # Import the following modules during type checking to enable code intelligence features,
27
+ # such as auto-completion in tools like pylance, even when these modules are not explicitly
28
+ # imported in user code.
29
+
30
+ import sympy
31
+
32
+ import torch
33
+ from torch import sym_float, sym_int, sym_max
34
+
35
+
36
+ ShapeType = Union[torch.Size, List[int], Tuple[int, ...]]
37
+ StrideType = Union[List[int], Tuple[int, ...]]
38
+ DimsType = Union[int, List[int], Tuple[int, ...]]
39
+ DimsSequenceType = Union[List[int], Tuple[int, ...]]
40
+ # TODO: Type[torch.SymInt], Type[torch.SymFloat]
41
+ NumberTypeType = Union[Type[bool], Type[int], Type[float], Type[complex]]
42
+ # TODO: This needs a lot more type annotations
43
+ # NumberType = Union[bool, int, float, complex, torch.SymInt, torch.SymFloat]
44
+ NumberType = Union[bool, int, float, complex]
45
+ RealNumberType = Union[bool, int, float]
46
+
47
+ Number = (bool, int, float, complex, torch.SymInt, torch.SymFloat)
48
+ # I don't call it Integral because numbers.Integral includes bool, but IntLike
49
+ # does not
50
+ Dim = int
51
+ IntLike = (int, torch.SymInt)
52
+ FloatLike = (float, torch.SymFloat)
53
+ IntWithoutSymInt = int
54
+ FloatWithoutSymFloat = float
55
+ DeviceLikeType = Union[str, torch.device, int]
56
+ Tensor = torch.Tensor
57
+
58
+
59
+ torch_function_passthrough = {
60
+ torch.device,
61
+ torch.sym_not,
62
+ torch.sym_float,
63
+ torch.sym_int,
64
+ torch.sym_max,
65
+ torch.sym_min,
66
+ torch.sym_sqrt,
67
+ torch.sym_ite,
68
+ torch.Tensor.dim,
69
+ torch.Tensor.ndim.__get__, # type: ignore[attr-defined]
70
+ torch.Tensor.numel,
71
+ torch.Tensor.size,
72
+ torch.Tensor.storage_offset,
73
+ torch.Tensor.stride,
74
+ torch.Tensor.dtype.__get__, # type: ignore[attr-defined]
75
+ torch.Tensor.is_sparse.__get__, # type: ignore[attr-defined]
76
+ torch.Tensor.shape.__get__, # type: ignore[attr-defined]
77
+ torch.Tensor.device.__get__, # type: ignore[attr-defined]
78
+ torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined]
79
+ torch.Tensor.layout.__get__, # type: ignore[attr-defined]
80
+ torch.Tensor.is_contiguous,
81
+ # For TorchRefsMode only
82
+ torch.Tensor.__format__,
83
+ torch.Tensor.__repr__,
84
+ torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined]
85
+ }
86
+
87
+
88
+ TensorLikeType = torch.Tensor
89
+ TensorLike = torch.Tensor
90
+ TensorSequenceType = Union[List[TensorLikeType], Tuple[TensorLikeType, ...]]
91
+ TensorOrNumberLikeType = Union[TensorLikeType, NumberType]
92
+
93
+ CustomOutParamAnnotation = "__custom_out_param__"
94
+
95
+
96
+ def same_shape(a: ShapeType, b: ShapeType, *, allow_rhs_unbacked=False) -> bool:
97
+ if len(a) != len(b):
98
+ return False
99
+
100
+ for x, y in zip(a, b):
101
+ if allow_rhs_unbacked:
102
+ # TODO: We should check that the symbols are consistent
103
+ # with each other
104
+ if isinstance(y, torch.SymInt):
105
+ continue
106
+ if x != y:
107
+ return False
108
+
109
+ return True
110
+
111
+
112
+ def _maybe_get_pytype(t):
113
+ if t is torch.SymFloat:
114
+ return float
115
+ elif t is torch.SymInt:
116
+ return int
117
+ elif t is torch.SymBool:
118
+ return bool
119
+ else:
120
+ return t
121
+
122
+
123
+ # TODO: look at using torch.testing.assert_close instead with an option
124
+ # to just compare metadata
125
+ def compare_tensor_meta(
126
+ a: TensorLikeType,
127
+ b: TensorLikeType,
128
+ check_strides=False,
129
+ *,
130
+ allow_rhs_unbacked=False,
131
+ ):
132
+ """
133
+ Checks that two tensor likes have the same shape,
134
+ dtype and device.
135
+
136
+ In the future this will validate additional metadata, like
137
+ strides.
138
+ """
139
+ assert isinstance(a, TensorLike)
140
+ assert isinstance(b, TensorLike)
141
+
142
+ if not same_shape(a.shape, b.shape, allow_rhs_unbacked=allow_rhs_unbacked):
143
+ msg = f"Shapes {a.shape} and {b.shape} are not equal!"
144
+ raise AssertionError(msg)
145
+
146
+ if a.dtype != b.dtype:
147
+ msg = f"Dtypes {a.dtype} and {b.dtype} are not equal!"
148
+ raise AssertionError(msg)
149
+
150
+ if a.device != b.device:
151
+ # Handles special cuda:0 vs cuda case
152
+ # TODO: we should review why this happens and see about fixing it
153
+ if (str(a.device) == "cuda:0" or str(a.device) == "cuda") and (
154
+ str(b.device) == "cuda:0" or str(b.device) == "cuda"
155
+ ):
156
+ pass
157
+ else:
158
+ msg = f"Devices {a.device} and {b.device} are not equal!"
159
+ raise AssertionError(msg)
160
+
161
+ # Stride checking is currently disabled, see https://github.com/pytorch/pytorch/issues/78050
162
+ if check_strides:
163
+ same_strides, idx = check_significant_strides(a, b)
164
+ if not same_strides:
165
+ msg = f"Stride mismatch! Strides are {a.stride()} and {b.stride()} (mismatched at {idx})!"
166
+ raise RuntimeError(msg)
167
+
168
+ if a.storage_offset() != b.storage_offset():
169
+ msg = f"Storage offset mismatch! Storage offsets are {a.storage_offset()} and {b.storage_offset()}!"
170
+ raise RuntimeError(msg)
171
+
172
+ if a.is_conj() != b.is_conj():
173
+ raise RuntimeError(
174
+ f"Conj mismatch! is_conj is set to {a.is_conj()} and {b.is_conj()}"
175
+ )
176
+
177
+ if a.is_neg() != b.is_neg():
178
+ raise RuntimeError(
179
+ f"Neg mismatch! is_neg is set to {a.is_neg()} and {b.is_neg()}"
180
+ )
181
+
182
+
183
+ def _check_strides_helper(
184
+ a: TensorLikeType, b: TensorLikeType, *, only_cuda=True, significant_only=True
185
+ ) -> Tuple[bool, Optional[int]]:
186
+ # NOTE: only on CUDA because CPU elementwise strides are incorrect in PyTorch
187
+ # See https://github.com/pytorch/pytorch/issues/77553
188
+ # Only compares strides that are "meaningful" -- strides for dimensions with length > 1
189
+ # and for tensors with more than one element
190
+ if (
191
+ not only_cuda or a.device.type == "cuda" or b.device.type == "cuda"
192
+ ) and a.numel() > 0:
193
+ for idx in range(a.ndim):
194
+ check = not significant_only or a.shape[idx] > 1
195
+ if a.stride()[idx] != b.stride()[idx] and check:
196
+ return False, idx
197
+
198
+ return True, None
199
+
200
+
201
+ def check_significant_strides(
202
+ a: TensorLikeType, b: TensorLikeType, *, only_cuda=True
203
+ ) -> Tuple[bool, Optional[int]]:
204
+ return _check_strides_helper(a, b, only_cuda=only_cuda, significant_only=True)
205
+
206
+
207
+ def check_all_strides(
208
+ a: TensorLikeType, b: TensorLikeType, *, only_cuda=True
209
+ ) -> Tuple[bool, Optional[int]]:
210
+ return _check_strides_helper(a, b, only_cuda=only_cuda, significant_only=False)
211
+
212
+
213
+ # This function is equivalent to compute_contiguous() from TensorImpl.cpp
214
+ def is_contiguous(a: TensorLikeType) -> bool:
215
+ """
216
+ Tests whether a tensor is contiguous or not.
217
+
218
+ Tensors are contiguous when they have no elements,
219
+ one element, or when they have "nested" strides.
220
+ """
221
+ if a.numel() < 2:
222
+ return True
223
+
224
+ expected_stride = 1
225
+ for x, y in reversed(tuple(zip(a.shape, a.stride()))):
226
+ # Skips checking strides when a dimension has length 1
227
+ if x == 1:
228
+ continue
229
+
230
+ if y != expected_stride:
231
+ return False
232
+ expected_stride = expected_stride * x
233
+
234
+ return True
235
+
236
+
237
+ # This function is equivalent to compute_channels_last_contiguous_2d() in TensorImpl.cpp
238
+ def is_channels_last_contiguous_2d(a: Tensor) -> bool:
239
+ # NHWC or not channels last 2D contiguous
240
+ if a.ndim != 4:
241
+ return False
242
+
243
+ expected_stride = 1
244
+ for idx in (1, 3, 2, 0):
245
+ length = a.shape[idx]
246
+ if length == 1:
247
+ continue
248
+
249
+ stride = a.stride()[idx]
250
+ if stride != expected_stride:
251
+ return False
252
+
253
+ expected_stride *= length
254
+
255
+ return True
256
+
257
+
258
+ def is_channels_last_contiguous_3d(a: Tensor) -> bool:
259
+ # NDHWC or not channels last 3D contiguous
260
+ if a.ndim != 5:
261
+ return False
262
+
263
+ expected_stride = 1
264
+ for idx in (1, 4, 3, 2, 0):
265
+ length = a.shape[idx]
266
+ if length == 1:
267
+ continue
268
+
269
+ stride = a.stride()[idx]
270
+ if stride != expected_stride:
271
+ return False
272
+
273
+ expected_stride *= length
274
+
275
+ return True
276
+
277
+
278
+ _memory_formats = {
279
+ torch.contiguous_format,
280
+ torch.preserve_format,
281
+ torch.channels_last,
282
+ torch.channels_last_3d,
283
+ }
284
+
285
+
286
+ def validate_memory_format(memory_format: torch.memory_format):
287
+ torch._check(
288
+ memory_format in _memory_formats,
289
+ lambda: f"Received unknown memory format {memory_format}!",
290
+ )
291
+
292
+
293
+ def is_contiguous_for_memory_format( # type: ignore[return]
294
+ a: Tensor, *, memory_format: torch.memory_format
295
+ ) -> bool:
296
+ validate_memory_format(memory_format)
297
+
298
+ if memory_format == torch.contiguous_format:
299
+ return is_contiguous(a)
300
+ if memory_format == torch.channels_last:
301
+ return is_channels_last_contiguous_2d(a)
302
+ if memory_format == torch.channels_last_3d:
303
+ return is_channels_last_contiguous_3d(a)
304
+
305
+ torch._check(
306
+ False,
307
+ lambda: f"is_contiguous received unsupported memory format {memory_format}",
308
+ )
309
+
310
+
311
+ # NOTE: that tensors with no elements and channels last is ???
312
+ def is_channels_last_contiguous(a: Tensor) -> bool:
313
+ """
314
+ True when a tensor is channels-last contiguous.
315
+
316
+ This requires that:
317
+
318
+ - the tensor is conceptually either 4 (NHWC) or 5 (NDHWC) dimensions
319
+ - if we name the tensor's dimensions NCHW or NCDHW, then the strides are such that the
320
+ stride of the 'C' dimension (Cs) is 1 and the strides corresponding to
321
+ each dimension (Xs) can be ordered Cs <= Ws <= Hs <= (Ds) <= Ns and are
322
+ "nested" -- so Ws = Cs * Cl, where Cl is the length of the 'C' dimension,
323
+ for example.
324
+ """
325
+ return is_channels_last_contiguous_2d(a) or is_channels_last_contiguous_3d(a)
326
+
327
+
328
+ def is_non_overlapping_and_dense(a: Tensor) -> bool:
329
+ """
330
+ True when a tensor is non-overlapping and dense.
331
+
332
+ A tensor is non-overlapping and dense when there exists a permutation of
333
+ its dimensions that is contiguous.
334
+ """
335
+
336
+ if a.is_sparse:
337
+ return False
338
+
339
+ # Short-circuits if the tensor is already contiguous or channels-last contiguous
340
+ if is_contiguous(a) or is_channels_last_contiguous(a):
341
+ return True
342
+
343
+ # The following is equivalent to compute_non_overlapping_and_dense in TensorImpl.cpp
344
+
345
+ # Short-circuits for tensors of rank one, which are
346
+ # non-overlapping and "dense" if their stride is one
347
+ if a.ndim == 1:
348
+ return a.stride()[0] == 1
349
+
350
+ # Checks that there exists a permutation of the strides s.t. the tensor would be contiguous
351
+ # Sorts (length, stride) pairs by stride
352
+ lengths_and_strides = sorted(zip(a.shape, a.stride()), key=operator.itemgetter(1))
353
+
354
+ expected_stride = 1
355
+ for length, stride in lengths_and_strides:
356
+ if length == 1:
357
+ continue
358
+
359
+ if stride != expected_stride:
360
+ return False
361
+
362
+ expected_stride *= length
363
+
364
+ return True
365
+
366
+
367
+ # NOTE: Based on the implementation in TensorIterator.cpp, but note that
368
+ # the note [Computing output strides] is incorrect, because it
369
+ # says that strides will be preserved even if they are not
370
+ # "non overlapping and dense", but this is incorrect. The
371
+ # output of elementwise operations are always given
372
+ # non overlapping and dense strides.
373
+ # This is also INCORRECT because it does not model TensorIterator's
374
+ # short-circuit, which can cause different strides.
375
+ def compute_elementwise_output_logical_to_physical_perm(
376
+ *tensors, _skip_checks=False
377
+ ) -> List[int]:
378
+ if not _skip_checks and len(tensors) == 0:
379
+ msg = "Can't compute elementwise output strides for zero tensors!"
380
+ raise ValueError(msg)
381
+
382
+ if not _skip_checks:
383
+ check_same_shape(*tensors, allow_cpu_scalar_tensors=True)
384
+
385
+ # Filters the tensors to actual tensors
386
+ if not _skip_checks:
387
+ tensors = tuple(
388
+ a
389
+ for a in tensors
390
+ if isinstance(a, TensorLike) and not is_cpu_scalar_tensor(a)
391
+ )
392
+
393
+ # Short-circuits for CPU scalar case
394
+ if len(tensors) == 0:
395
+ return []
396
+
397
+ # Short-circuits for shapes with zero or one dimensions
398
+ # TODO: are these necessary?
399
+ ndim = tensors[0].ndim
400
+ if ndim == 0:
401
+ return []
402
+ if ndim == 1:
403
+ return [0]
404
+
405
+ # Short-circuits if contiguous, following the fake fast path.
406
+ # This reduces the number of guards we end up making
407
+ # TODO: do channels last too
408
+ is_contiguous = True
409
+ for t in tensors:
410
+ is_contiguous = is_contiguous and t.is_contiguous(
411
+ memory_format=torch.contiguous_format
412
+ )
413
+
414
+ if is_contiguous:
415
+ return list(range(ndim))
416
+
417
+ shape = tensors[0].shape
418
+
419
+ def should_swap(idx_a, idx_b):
420
+ for tensor in tensors:
421
+ stride_a = tensor.stride()[idx_a]
422
+ stride_b = tensor.stride()[idx_b]
423
+
424
+ if stride_a == 0 or stride_b == 0:
425
+ continue
426
+
427
+ if stride_a < stride_b:
428
+ return -1
429
+
430
+ if stride_a > stride_b:
431
+ return 1
432
+
433
+ # stride_a == stride_b
434
+ if shape[idx_a] > shape[idx_b]:
435
+ return 1
436
+
437
+ # Note: this case is hit if all strides are zero,
438
+ # or all strides are equal and all dimensions have the same length
439
+ return 0
440
+
441
+ # The "sort" order for the permutation is back-to-front, but
442
+ # the natural order for permutations is front-to-back. Do the
443
+ # sorting back-to-front and then reverse it on output.
444
+ #
445
+ # also, note this returns the logical to physical shape permutation
446
+ perm = list(reversed(range(ndim)))
447
+
448
+ # insertion sort with support for ambiguous comparisons
449
+ for i in range(1, ndim):
450
+ dim1 = i
451
+ for dim0 in reversed(range(i)):
452
+ comparison = should_swap(perm[dim0], perm[dim1])
453
+ if comparison > 0:
454
+ perm[dim0], perm[dim1] = perm[dim1], perm[dim0]
455
+ dim1 = dim0
456
+ elif comparison < 0:
457
+ break
458
+
459
+ return list(reversed(perm))
460
+
461
+
462
+ def compute_elementwise_output_strides(*tensors) -> Tuple[int, ...]:
463
+ """
464
+ Computes the output strides for elementwise operations.
465
+ """
466
+ if len(tensors) == 0:
467
+ msg = "Can't compute elementwise output strides for zero tensors!"
468
+ raise ValueError(msg)
469
+
470
+ check_same_shape(*tensors, allow_cpu_scalar_tensors=True)
471
+
472
+ # Filters the tensors to actual tensors
473
+ tensors = tuple(
474
+ a for a in tensors if isinstance(a, TensorLike) and not is_cpu_scalar_tensor(a)
475
+ )
476
+
477
+ # Short-circuits for CPU scalar case
478
+ if len(tensors) == 0:
479
+ return ()
480
+
481
+ ndim = tensors[0].ndim
482
+ shape = tensors[0].shape
483
+
484
+ if ndim == 0:
485
+ return ()
486
+ if ndim == 1:
487
+ return (1,)
488
+
489
+ logical_to_physical_perm = compute_elementwise_output_logical_to_physical_perm(
490
+ *tensors, _skip_checks=True
491
+ )
492
+ permuted_shape = apply_perm(shape, logical_to_physical_perm) # to physical
493
+
494
+ new_strides = make_contiguous_strides_for(permuted_shape)
495
+ permuted_strides = apply_perm(
496
+ new_strides, invert_perm(logical_to_physical_perm)
497
+ ) # to logical
498
+
499
+ return tuple(permuted_strides)
500
+
501
+
502
+ # Identity permutation is [0, 1, 2]
503
+ def apply_perm(inp, perm):
504
+ ndim = len(inp)
505
+ permuted_inp = [-1] * ndim
506
+ for idx, x in enumerate(perm):
507
+ permuted_inp[idx] = inp[x]
508
+ return permuted_inp
509
+
510
+
511
+ def invert_perm(perm):
512
+ ndim = len(perm)
513
+ new_perm = [-1] * ndim
514
+ for idx, x in enumerate(perm):
515
+ new_perm[x] = idx
516
+ return new_perm
517
+
518
+
519
+ #
520
+ # Common helper functions
521
+ #
522
+
523
+
524
+ def validate_dim_length(length: int):
525
+ """
526
+ Validates that an object represents a valid
527
+ dimension length.
528
+ """
529
+
530
+ if isinstance(length, (int, torch.SymInt)):
531
+ torch._check_is_size(length)
532
+ else:
533
+ # sometimes called with sympy expression by inductor
534
+ assert length >= 0
535
+
536
+
537
+ def validate_shape(shape: ShapeType):
538
+ """
539
+ Validates that a sequence represents a valid shape.
540
+ """
541
+
542
+ assert isinstance(shape, Sequence), type(shape)
543
+ for l in shape:
544
+ validate_dim_length(l)
545
+
546
+
547
+ def validate_strides(strides: StrideType):
548
+ """
549
+ Verifies the object specifies valid strides.
550
+ """
551
+
552
+ assert isinstance(strides, Sequence)
553
+ for stride in strides:
554
+ assert stride >= 0
555
+
556
+
557
+ def validate_idx(rank: int, idx: int):
558
+ """
559
+ Validates that idx is a valid index for the given shape.
560
+ Assumes the index is already canonicalized.
561
+ """
562
+
563
+ assert isinstance(idx, Dim)
564
+ assert isinstance(rank, Dim)
565
+
566
+ assert idx >= 0 and idx < rank or idx == 0
567
+
568
+
569
+ def validate_dimension_indices(rank: int, indices: DimsSequenceType):
570
+ for idx in indices:
571
+ validate_idx(rank, idx)
572
+
573
+
574
+ def validate_exclusive_idx(rank: int, ex_idx: int):
575
+ """
576
+ Validates that ex_idx is a valid exclusive index
577
+ for the given shape.
578
+ """
579
+
580
+ assert isinstance(ex_idx, Dim)
581
+ assert isinstance(rank, Dim)
582
+ assert ex_idx > 0 and ex_idx <= rank
583
+
584
+
585
+ # "Wraps" a dim (up to one time) for the given rank, allowing dims to be
586
+ # specified using negative indices. If `wrap_scalar` is true then scalar
587
+ # tensors of rank 0 will allow dimensions in the range [-1, 0]. Otherwise,
588
+ # idx should be in the range [-rank, rank-1].
589
+ def canonicalize_dim(rank: int, idx: int, wrap_scalar: bool = True) -> int:
590
+ if rank < 0:
591
+ msg = f"Rank cannot be negative but got {rank}"
592
+ raise IndexError(msg)
593
+
594
+ if rank == 0:
595
+ if not wrap_scalar:
596
+ msg = f"Dimension specified as {idx} but tensor has no dimensions"
597
+ raise IndexError(msg)
598
+ rank = 1
599
+
600
+ if idx >= 0 and idx < rank:
601
+ return idx
602
+
603
+ if idx < 0:
604
+ _idx = idx + rank
605
+ else:
606
+ _idx = idx
607
+
608
+ if _idx < 0 or _idx >= rank:
609
+ # Same error message as in aten/src/ATen/WrapDimUtils.h:49
610
+ msg = f"Dimension out of range (expected to be in range of [{-rank}, {rank - 1}], but got {idx})"
611
+ raise IndexError(msg)
612
+
613
+ return _idx
614
+
615
+
616
+ # Takes a dimension or sequence of dimensions and "wraps" them,
617
+ # mapping negative offsets to positive ones
618
+ @overload
619
+ def canonicalize_dims(
620
+ rank: int, indices: Sequence[int], wrap_scalar: bool = True
621
+ ) -> Tuple[int, ...]:
622
+ pass
623
+
624
+
625
+ @overload
626
+ def canonicalize_dims(rank: int, indices: int, wrap_scalar: bool = True) -> int:
627
+ pass
628
+
629
+
630
+ def canonicalize_dims(rank, indices, wrap_scalar=True):
631
+ if isinstance(indices, Dim):
632
+ return canonicalize_dim(rank, indices, wrap_scalar)
633
+
634
+ return tuple(canonicalize_dim(rank, x, wrap_scalar) for x in indices)
635
+
636
+
637
+ def is_valid_permutation(rank: int, perm: DimsSequenceType) -> bool:
638
+ """
639
+ Validates that perm is a permutation of length rank.
640
+ """
641
+
642
+ if not isinstance(perm, Sequence):
643
+ return False
644
+
645
+ if not (tuple(sorted(perm)) == tuple(range(0, rank))):
646
+ return False
647
+
648
+ return True
649
+
650
+
651
+ def is_same_shape(a: Sequence, b: Sequence) -> bool:
652
+ """
653
+ Compares two shapes a and b, returning True if they are the same
654
+ (their ranks and corresponding lengths match) and False otherwise.
655
+ """
656
+
657
+ return tuple(a) == tuple(b)
658
+
659
+
660
+ def is_cpu_scalar_tensor(a: Any) -> bool:
661
+ return isinstance(a, TensorLike) and a.ndim == 0 and a.device.type == "cpu"
662
+
663
+
664
+ def check_same_device(*args, allow_cpu_scalar_tensors):
665
+ """
666
+ Checks that all Tensors in args have the same device.
667
+
668
+ Raises a RuntimeError when:
669
+ - args contains an object whose type is not Tensor or Number
670
+ - two Tensor objects in args have different devices, unless one is a CPU scalar tensor and allow_cpu_scalar_tensors is True
671
+ """
672
+ # Short-circuits if all (one or fewer) arguments are trivially on the same device
673
+ if len(args) <= 1:
674
+ return
675
+
676
+ # Note: cannot initialize device to the first arg's device (it may not have one)
677
+ device = None
678
+ for arg in args:
679
+ if isinstance(arg, Number):
680
+ continue
681
+ elif isinstance(arg, TensorLike):
682
+ if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
683
+ continue
684
+
685
+ if device is None:
686
+ device = arg.device
687
+
688
+ if device != arg.device:
689
+ msg = (
690
+ "Tensor on device "
691
+ + str(arg.device)
692
+ + " is not on the expected device "
693
+ + str(device)
694
+ + "!"
695
+ )
696
+ raise RuntimeError(msg)
697
+ else:
698
+ msg = (
699
+ "Unexpected type when checking for same device, " + str(type(arg)) + "!"
700
+ )
701
+ raise RuntimeError(msg)
702
+
703
+
704
+ def canonicalize_device(device: DeviceLikeType) -> torch.device:
705
+ if isinstance(device, torch.device):
706
+ return device
707
+
708
+ assert isinstance(device, str)
709
+ return torch.device(device)
710
+
711
+
712
+ # Asserts if any of the following are true:
713
+ # - a non-scalar or non-Tensor is given
714
+ # - the shape of any tensors is distinct
715
+ def check_same_shape(*args, allow_cpu_scalar_tensors: bool):
716
+ """
717
+ Checks that all Tensors in args have the same shape.
718
+
719
+ Raises a RuntimeError when:
720
+ - args contains an object whose type is not Tensor or Number
721
+ - two Tensor objects in args have different devices
722
+ """
723
+ shape = None
724
+
725
+ for arg in args:
726
+ if isinstance(arg, Number):
727
+ continue
728
+ elif isinstance(arg, TensorLike):
729
+ if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
730
+ continue
731
+
732
+ if shape is None:
733
+ shape = arg.shape
734
+
735
+ if not is_same_shape(shape, arg.shape):
736
+ msg = f"Shape {arg.shape} is not the expected shape {shape}!"
737
+ raise RuntimeError(msg)
738
+ else:
739
+ msg = (
740
+ "Unexpected type when checking for same shape, " + str(type(arg)) + "!"
741
+ )
742
+ raise RuntimeError(msg)
743
+
744
+
745
+ # Acquires a common shape, if it exists, from one or more tensor arguments,
746
+ # filtering number arguments
747
+ def extract_shape(*args, allow_cpu_scalar_tensors: bool) -> Optional[ShapeType]:
748
+ shape = None
749
+ scalar_shape = None
750
+
751
+ for arg in args:
752
+ if isinstance(arg, Number):
753
+ continue
754
+ elif isinstance(arg, TensorLike):
755
+ if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):
756
+ scalar_shape = arg.shape
757
+ continue
758
+
759
+ if shape is None:
760
+ shape = arg.shape
761
+
762
+ if not is_same_shape(shape, arg.shape):
763
+ return None
764
+ else:
765
+ return None
766
+
767
+ return shape if shape is not None else scalar_shape
768
+
769
+
770
+ # Extracts dimensions that might be passed either as a list/tuple or as varargs.
771
+ # A typical case is Tensor.permute .
772
+ def extract_dims_from_varargs(
773
+ dims: Union[DimsSequenceType, Tuple[DimsSequenceType, ...]]
774
+ ) -> DimsSequenceType:
775
+ if dims and isinstance(dims[0], Sequence):
776
+ assert len(dims) == 1
777
+ dims = cast(Tuple[DimsSequenceType], dims)
778
+ return dims[0]
779
+ else:
780
+ return cast(DimsSequenceType, dims)
781
+
782
+
783
+ def extract_shape_from_varargs(
784
+ shape: Union[ShapeType, Tuple[ShapeType]],
785
+ validate=True,
786
+ ) -> Tuple[int, ...]:
787
+ """
788
+ Returns a shape from varargs.
789
+
790
+ In PyTorch, operations that accept shapes often accept them as varargs, like
791
+ foo(*shape). However a user can pass the shape as a sequence of integers,
792
+ like this:
793
+
794
+ foo(1, 2, 3)
795
+
796
+ or as a sequence of integers
797
+
798
+ foo((1, 2, 3))
799
+
800
+ In the first case shape will be a tuple of integers, and in the second case it's a tuple
801
+ containing a tuple of integers. This validates those inputs and canonicalizes them
802
+ to a tuple of integers.
803
+ """
804
+
805
+ # Handles tuple unwrapping
806
+ if len(shape) == 1 and isinstance(shape[0], Sequence):
807
+ shape = shape[0]
808
+
809
+ if validate:
810
+ validate_shape(shape) # type: ignore[arg-type]
811
+ return shape # type: ignore[return-value]
812
+
813
+
814
+ def infer_size_shapes(a: ShapeType, b: ShapeType) -> Tuple[int, ...]:
815
+ ndim = max(len(a), len(b))
816
+ expandedSizes = [0] * ndim
817
+
818
+ for i in range(ndim - 1, -1, -1):
819
+ offset = ndim - 1 - i
820
+ dimA = len(a) - 1 - offset
821
+ dimB = len(b) - 1 - offset
822
+ sizeA = a[dimA] if dimA >= 0 else 1
823
+ sizeB = b[dimB] if dimB >= 0 else 1
824
+
825
+ torch._check(
826
+ (sizeA == sizeB) or (sizeA == 1) or (sizeB == 1),
827
+ lambda: (
828
+ f"The size of tensor a ({sizeA}) must match the size of "
829
+ f"tensor b ({sizeB}) at non-singleton dimension {i}"
830
+ ),
831
+ )
832
+
833
+ # 1s map to the other size (even 0)
834
+ expandedSizes[i] = sizeB if sizeA == 1 else sizeA
835
+
836
+ return tuple(expandedSizes)
837
+
838
+
839
+ def infer_size(shape: ShapeType, numel: int) -> Tuple[int, ...]:
840
+ """
841
+ Infers the size of a dim with size -1, if it exists.
842
+ Also checks that new shape is compatible with the number of elements.
843
+ """
844
+ dim = None
845
+ newsize = 1
846
+ for i, d in enumerate(shape):
847
+ if d == -1:
848
+ torch._check(dim is None, lambda: "only one dimension can be inferred")
849
+ dim = i
850
+ elif d >= 0:
851
+ newsize *= d
852
+ else:
853
+ torch._check(False, lambda: f"invalid shape dimension {d}")
854
+ if dim is None:
855
+ torch._check(
856
+ numel == newsize,
857
+ lambda: f"shape '{list(shape)}' is invalid for input of size {numel}",
858
+ )
859
+ else:
860
+ from torch.fx.experimental.symbolic_shapes import definitely_true
861
+
862
+ torch._check(
863
+ newsize != 0,
864
+ lambda: (
865
+ f"cannot reshape tensor of 0 elements into shape {list(shape)} because the "
866
+ f"unspecified dimension size -1 can be any value and is ambiguous"
867
+ if definitely_true(numel == 0)
868
+ else f"shape '{list(shape)}' is invalid for input of size {numel}"
869
+ ),
870
+ )
871
+ torch._check(
872
+ numel % newsize == 0,
873
+ lambda: f"shape '{list(shape)}' is invalid for input of size {numel}",
874
+ )
875
+ # Convert to list to produce a compatible error message with core
876
+ # PyTorch, which prints sequences in square brackets.
877
+ shape = list(shape)
878
+ shape[dim] = numel // newsize
879
+ # NB: This is pretty important when you have unbacked SymInts.
880
+ # Suppose you have (i0, 12) resizing into (2, -1, 12). The old
881
+ # range for i0 is typically [2, inf], which means if you divide
882
+ # by two the new range should be [1, inf]. But this is bad news
883
+ # if you have an unbacked SymInt: we need to reapply the unsound
884
+ # assumption that the size is >= 2.
885
+ torch._check_is_size(shape[dim])
886
+ return tuple(shape)
887
+
888
+
889
+ _integer_dtypes = (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
890
+ _low_precision_dtypes = (torch.float16, torch.bfloat16, torch.complex32)
891
+ _complex_dtypes = (torch.complex32, torch.complex64, torch.complex128)
892
+
893
+
894
+ def is_boolean_dtype(dtype: torch.dtype) -> bool:
895
+ assert isinstance(dtype, torch.dtype)
896
+ return dtype is torch.bool
897
+
898
+
899
+ def is_integer_dtype(dtype: torch.dtype) -> bool:
900
+ assert isinstance(dtype, torch.dtype)
901
+ return dtype in _integer_dtypes
902
+
903
+
904
+ def is_low_precision_dtype(dtype: torch.dtype) -> bool:
905
+ assert isinstance(dtype, torch.dtype)
906
+ return dtype in _low_precision_dtypes
907
+
908
+
909
+ def is_float_dtype(dtype: torch.dtype) -> bool:
910
+ assert isinstance(dtype, torch.dtype)
911
+ return dtype.is_floating_point
912
+
913
+
914
+ def is_complex_dtype(dtype: torch.dtype) -> bool:
915
+ assert isinstance(dtype, torch.dtype)
916
+ return dtype in _complex_dtypes
917
+
918
+
919
+ def is_grad_dtype(dtype: torch.dtype) -> bool:
920
+ """
921
+ Checks if the dtype can require a gradient.
922
+ """
923
+ return dtype.is_floating_point or is_complex_dtype(dtype)
924
+
925
+
926
+ _complex_to_real_dtype_map = {
927
+ torch.complex128: torch.float64,
928
+ torch.complex64: torch.float32,
929
+ torch.complex32: torch.float16,
930
+ }
931
+
932
+ _real_to_complex_dtype_map = {
933
+ torch.float16: torch.complex32,
934
+ torch.bfloat16: torch.complex64,
935
+ torch.float32: torch.complex64,
936
+ torch.float64: torch.complex128,
937
+ }
938
+
939
+
940
+ def corresponding_real_dtype(dtype: torch.dtype) -> torch.dtype:
941
+ return _complex_to_real_dtype_map[dtype]
942
+
943
+
944
+ def corresponding_complex_dtype(dtype: torch.dtype) -> torch.dtype:
945
+ return _real_to_complex_dtype_map[dtype]
946
+
947
+
948
+ def dtype_to_type(dtype: torch.dtype) -> type:
949
+ """
950
+ Computes the corresponding Python type (AKA "type kind") for the
951
+ given dtype.
952
+ """
953
+ assert isinstance(dtype, torch.dtype)
954
+
955
+ if dtype is torch.bool:
956
+ return bool
957
+ if dtype in _integer_dtypes:
958
+ return int
959
+ if dtype.is_floating_point:
960
+ return float
961
+ if dtype in _complex_dtypes:
962
+ return complex
963
+
964
+ raise ValueError("Invalid dtype!")
965
+
966
+
967
+ def dtype_to_type_ctor(dtype: torch.dtype) -> Callable[[NumberType], NumberType]:
968
+ """
969
+ Computes the corresponding Python type constructor for the
970
+ given dtype.
971
+ """
972
+ assert isinstance(dtype, torch.dtype)
973
+
974
+ if dtype is torch.bool:
975
+ return lambda x: bool(x)
976
+ if dtype in _integer_dtypes:
977
+ return sym_int
978
+ if dtype.is_floating_point:
979
+ return sym_float
980
+ if dtype in _complex_dtypes:
981
+ # TODO: type error here is real, replace with sym_complex
982
+ return lambda x: complex(x) # type: ignore[arg-type]
983
+
984
+ raise ValueError("Invalid dtype!")
985
+
986
+
987
+ def type_to_dtype(typ: type) -> torch.dtype:
988
+ """
989
+ Computes the corresponding dtype for a Number type.
990
+ """
991
+
992
+ assert isinstance(typ, type)
993
+
994
+ if typ is bool:
995
+ return torch.bool
996
+ if typ in [int, torch.SymInt]:
997
+ return torch.long
998
+ if typ in [float, torch.SymFloat]:
999
+ return torch.get_default_dtype()
1000
+ # TODO: sym_complex_float?
1001
+ if typ is complex:
1002
+ return corresponding_complex_dtype(torch.get_default_dtype())
1003
+
1004
+ raise ValueError("Invalid type!")
1005
+
1006
+
1007
+ def get_dtype(x: Union[torch.Tensor, NumberType]):
1008
+ if isinstance(x, torch.Tensor):
1009
+ return x.dtype
1010
+ else:
1011
+ return type_to_dtype(type(x))
1012
+
1013
+
1014
+ _ordered_types = (bool, int, float, complex)
1015
+
1016
+
1017
+ def check_fp_or_complex(
1018
+ dtype: torch.dtype, fn_name: str, allow_low_precision_dtypes: bool = True
1019
+ ):
1020
+ """
1021
+ Checks whether the input is floating point or complex.
1022
+ If allow_low_precision_dtypes is True, it allows having float16, bfloat16, and complex32
1023
+ """
1024
+ torch._check(
1025
+ is_float_dtype(dtype) or is_complex_dtype(dtype),
1026
+ lambda: f"{fn_name}: Expected a floating point or complex tensor as input. Got {dtype}",
1027
+ )
1028
+ torch._check(
1029
+ allow_low_precision_dtypes or not is_low_precision_dtype(dtype),
1030
+ lambda: f"{fn_name}: Half precision dtypes not supported. Got {dtype}",
1031
+ )
1032
+
1033
+
1034
+ def check_is_matrix(A: TensorLikeType, f_name: str, arg_name: str = "A"):
1035
+ torch._check(
1036
+ len(A.shape) >= 2,
1037
+ lambda: f"{f_name}: The input tensor {arg_name} must have at least 2 dimensions.",
1038
+ )
1039
+
1040
+
1041
+ def get_higher_type(a: type, b: type) -> type:
1042
+ """
1043
+ Returns the higher of the two given Number types.
1044
+
1045
+ The types are ordered bool -> int -> float -> complex.
1046
+ """
1047
+ a, b = _maybe_get_pytype(a), _maybe_get_pytype(b)
1048
+ # Type checking
1049
+ if a not in _ordered_types or b not in _ordered_types:
1050
+ raise RuntimeError(f"Expected builtin numeric types, found {a}, {b}")
1051
+
1052
+ if a is b:
1053
+ return a
1054
+
1055
+ for typ in _ordered_types:
1056
+ if a is typ:
1057
+ return b
1058
+ if b is typ:
1059
+ return a
1060
+
1061
+ raise ValueError("Unknown Python scalar type!")
1062
+
1063
+
1064
+ # Returns the higher of two torch datatypes a and b or, if the two
1065
+ # are not ordered relative to each other, the next
1066
+ # higher datatype
1067
+ def get_higher_dtype(
1068
+ a: Optional[Union[torch.dtype, TensorLikeType, NumberType]],
1069
+ b: Optional[Union[torch.dtype, TensorLikeType, NumberType]],
1070
+ ) -> Optional[torch.dtype]:
1071
+ """
1072
+ Computes the "lowest" datatype that is weakly
1073
+ "higher" than both a and b.
1074
+ """
1075
+
1076
+ # Type checking
1077
+ assert a is None or isinstance(a, (torch.dtype, TensorLike, Number))
1078
+ assert b is None or isinstance(b, (torch.dtype, TensorLike, Number))
1079
+
1080
+ def _extract_dtype(
1081
+ x: Optional[Union[torch.dtype, TensorLikeType, NumberType]]
1082
+ ) -> Optional[torch.dtype]:
1083
+ if x is None:
1084
+ return None
1085
+ if isinstance(x, torch.dtype):
1086
+ return x
1087
+ if isinstance(x, TensorLike):
1088
+ return x.dtype
1089
+ if isinstance(x, Number):
1090
+ return type_to_dtype(type(x))
1091
+
1092
+ raise RuntimeError("Unexpected type given to _extract_dtype!")
1093
+
1094
+ a, b = _extract_dtype(a), _extract_dtype(b)
1095
+
1096
+ if a is b:
1097
+ return a
1098
+
1099
+ if a is None:
1100
+ return b
1101
+
1102
+ if b is None:
1103
+ return a
1104
+
1105
+ ordered_datatypes = (
1106
+ (torch.bool,),
1107
+ (torch.uint8, torch.int8),
1108
+ (torch.int16,),
1109
+ (torch.int32,),
1110
+ (torch.int64,),
1111
+ (torch.float16, torch.bfloat16),
1112
+ (torch.float32,),
1113
+ (torch.float64,),
1114
+ (torch.complex32,),
1115
+ (torch.complex64,),
1116
+ (torch.complex128,),
1117
+ )
1118
+
1119
+ for idx, dtypes in enumerate(ordered_datatypes):
1120
+ if a in dtypes and b in dtypes:
1121
+ return ordered_datatypes[idx + 1][0]
1122
+ if a in dtypes:
1123
+ return b
1124
+ if b in dtypes:
1125
+ return a
1126
+
1127
+ raise RuntimeError("Unexpected termination!")
1128
+
1129
+
1130
+ def check_pin_memory(pin_memory: bool):
1131
+ torch._check_not_implemented(
1132
+ not pin_memory, lambda: "PrimTorch does not support pinned memory"
1133
+ )
1134
+
1135
+
1136
+ def check_layout(layout: torch.layout):
1137
+ torch._check_not_implemented(
1138
+ layout == torch.strided, lambda: f"PrimTorch doesn't support layout={layout}"
1139
+ )
1140
+
1141
+
1142
+ # TODO: maybe unify with can_cast_to?
1143
+ def is_weakly_lesser_type(a: type, b: type) -> bool:
1144
+ """
1145
+ Compares two types, a and b, returning True if a is weakly "less" than b.
1146
+
1147
+ The comparison is determined by the following type ordering: bool, int, float, complex.
1148
+ """
1149
+
1150
+ a, b = _maybe_get_pytype(a), _maybe_get_pytype(b)
1151
+
1152
+ if a not in _ordered_types or b not in _ordered_types:
1153
+ raise RuntimeError(f"Expected builtin numeric types, found {a}, {b}")
1154
+
1155
+ for typ in _ordered_types:
1156
+ if a == typ:
1157
+ return True
1158
+ if b == typ:
1159
+ return False
1160
+
1161
+ raise RuntimeError("Unexpected termination!")
1162
+
1163
+
1164
+ def can_safe_cast_to(*, cast_to: torch.dtype, cast_from: torch.dtype) -> bool:
1165
+ for fn in (is_complex_dtype, is_float_dtype, is_integer_dtype, is_boolean_dtype):
1166
+ if fn(cast_to):
1167
+ return True
1168
+ if fn(cast_from):
1169
+ return False
1170
+
1171
+ raise ValueError(f"Received unknown dtypes {cast_to}, {cast_from}!")
1172
+
1173
+
1174
+ def check_same_dtype(*args):
1175
+ """
1176
+ Checks that all Tensors in args have the same device and that all Numbers have the
1177
+ same corresponding Python type.
1178
+
1179
+ Raises a RuntimeError when:
1180
+ - args contains an object whose type is not Tensor or Number
1181
+ - two Tensors objects in args have different dtypes
1182
+ - two Number objects in args have different types
1183
+ - there are Tensors and Numbers in args, and one of those Tensors corresponding
1184
+ Python types is different from the type of one of those Numbers
1185
+ """
1186
+ full_dtype = None
1187
+ scalar_type = None
1188
+
1189
+ for arg in args:
1190
+ if isinstance(arg, Number):
1191
+ # Scalar type checking is disabled (and may be removed in the future)
1192
+ continue
1193
+ # if scalar_type is None:
1194
+ # scalar_type = type(arg)
1195
+
1196
+ # if scalar_type is not type(arg):
1197
+ # msg = (
1198
+ # "Scalar of type "
1199
+ # + str(type(arg))
1200
+ # + " is not the expected type of "
1201
+ # + str(scalar_type)
1202
+ # + "!"
1203
+ # )
1204
+ # raise RuntimeError(msg)
1205
+ elif isinstance(arg, TensorLike):
1206
+ if full_dtype is None:
1207
+ full_dtype = arg.dtype
1208
+ if scalar_type is None:
1209
+ scalar_type = dtype_to_type(arg.dtype)
1210
+
1211
+ if full_dtype is not arg.dtype:
1212
+ msg = (
1213
+ "Tensor with dtype "
1214
+ + str(arg.dtype)
1215
+ + " is not the expected dtype of "
1216
+ + str(full_dtype)
1217
+ + "!"
1218
+ )
1219
+ raise RuntimeError(msg)
1220
+
1221
+ arg_type = dtype_to_type(arg.dtype)
1222
+ if arg_type is not scalar_type:
1223
+ msg = (
1224
+ "Tensor with corresponding Python type "
1225
+ + str(arg_type)
1226
+ + " is not the expected type of "
1227
+ + str(scalar_type)
1228
+ + "!"
1229
+ )
1230
+ raise RuntimeError(msg)
1231
+ else:
1232
+ msg = (
1233
+ "Unexpected type when checking for same dtype, " + str(type(arg)) + "!"
1234
+ )
1235
+ raise RuntimeError(msg)
1236
+
1237
+
1238
+ # Maps datatypes to their computation types for elementwise operations
1239
+ _computation_dtype_map = {
1240
+ torch.bfloat16: torch.float32,
1241
+ torch.float16: torch.float32,
1242
+ torch.complex32: torch.complex64,
1243
+ }
1244
+
1245
+
1246
+ def get_computation_dtype(dtype: torch.dtype) -> torch.dtype:
1247
+ return _computation_dtype_map.get(dtype, dtype)
1248
+
1249
+
1250
+ _cpu_acc_type_map = {
1251
+ torch.bfloat16: torch.float64,
1252
+ torch.float16: torch.float64,
1253
+ torch.float32: torch.float64,
1254
+ torch.complex32: torch.complex128,
1255
+ torch.complex64: torch.complex128,
1256
+ }
1257
+
1258
+
1259
+ def get_acc_type(dtype: torch.dtype, device: torch.device) -> torch.dtype:
1260
+ # Equivalent to at::toAccumulateType, prefer computation_dtype where possible
1261
+ if device.type == "cpu":
1262
+ return _cpu_acc_type_map.get(dtype, dtype)
1263
+ else:
1264
+ return get_computation_dtype(dtype)
1265
+
1266
+
1267
+ class ELEMENTWISE_TYPE_PROMOTION_KIND(Enum):
1268
+ DEFAULT = (0,)
1269
+ NO_OPMATH = (1,)
1270
+ INT_TO_FLOAT = (2,)
1271
+ ALWAYS_BOOL = (3,)
1272
+ COMPLEX_TO_FLOAT = (4,)
1273
+ BOOL_TO_LONG = (5,)
1274
+
1275
+
1276
+ class REDUCTION_OUTPUT_TYPE_KIND(Enum):
1277
+ SAME = (0,)
1278
+ COMPLEX_TO_FLOAT = (1,) # for complex types outputs corresponding real type
1279
+ KEEP_PROMOTED_TYPE = (2,) # keep output in opmath type, needed for mean
1280
+ ALWAYS_BOOL = (3,)
1281
+
1282
+
1283
+ # Describes the return type of the primitive:
1284
+ #
1285
+ # - NEW, a new tensor is created
1286
+ # - VIEW, a view of an input tensor is returned
1287
+ # - INPLACE, one or more input tensors is modified
1288
+ #
1289
+ # these descriptors are mututally exclusive and exhaustive.
1290
+ class RETURN_TYPE(Enum):
1291
+ NEW = (0,)
1292
+ VIEW = (1,)
1293
+ INPLACE = (2,)
1294
+
1295
+
1296
+ # TODO: when NumberType contains the sym types, can simplify this
1297
+ def number_type(x: Union[NumberType, torch.SymInt, torch.SymFloat]) -> Type:
1298
+ if isinstance(x, torch.SymInt):
1299
+ return int
1300
+ elif isinstance(x, torch.SymFloat):
1301
+ return float
1302
+ else:
1303
+ return type(x)
1304
+
1305
+
1306
+ def symbol_type(x: sympy.Symbol) -> Type:
1307
+ if x.is_integer: # type: ignore[attr-defined]
1308
+ return int
1309
+ else:
1310
+ # NB: Not strictly correct, but we don't support SymPy complex or bool.
1311
+ return float
1312
+
1313
+
1314
+ # TODO: document type promotion kinds
1315
+ def elementwise_dtypes(
1316
+ *_args,
1317
+ type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND,
1318
+ ) -> Tuple[torch.dtype, torch.dtype]:
1319
+ """
1320
+ Computes the computation and result dtypes for elementwise type promotion
1321
+ on the given arguments and with the given elementwise type promotion kind.
1322
+
1323
+ Note that not all inputs to an elementwise operation necessarily participate in type promotion.
1324
+ For example, the "alpha" parameter of torch.add does not participate in type promotion,
1325
+ although it may be cast to the Python type corresponding to the computation dtype that
1326
+ the type promotion algorithm determines.
1327
+
1328
+ Default elementwise type promotion, which all other type promotion kinds tweak (see below),
1329
+ first decides which of four ordered types to use:
1330
+
1331
+ bool -> integer -> floating point -> complex
1332
+
1333
+ The selected type is the "lowest" type in the above list such that all number arguments
1334
+ have a weakly "lower" type and all tensor arguments have a weakly lower corresponding
1335
+ type for their dtype.
1336
+
1337
+ Once the type is determined, the particular result dtype is found. The dtypes are
1338
+ partially ordered as follows:
1339
+
1340
+ bool -> uint8, int8 -> int16 -> int32 -> int64 ->
1341
+ float16, bfloat16 -> float32 -> float64 -> complex32 -> complex64 -> complex128
1342
+
1343
+ The result dtype is selected by:
1344
+ - if no tensor's dtype has the same corresponding type as the one selected,
1345
+ then the result dtype is the (default) dtype corresponding to the selected type
1346
+ (for example, 1.5 + an integer tensor has a result dtype of the default floating point dtype)
1347
+ - if the result type is complex then the dtype is:
1348
+ - the default complex dtype if there are no floating point or complex tensors
1349
+ - if there are floating point or complex tensors with one or more dimensions, then
1350
+ the complex dtype corresponding to the highest corresponding complex dtype among those tensors
1351
+ (for example, double + cfloat -> cdouble)
1352
+ - if there are only floating point or complex tensors with zero dimensions, then
1353
+ the complex dtype corresponding to the highest corresponding complex dtype among those tensors
1354
+ - if the first two cases do not apply, the result dtype is the highest dtype among
1355
+ all tensors with one or more dimensions of the output type, and if there are no such
1356
+ tensors then it's the highest dtype among all tensors with zero dimensions of the output type
1357
+ (for example, long + half -> half, even if the half tensor has zero dimensions)
1358
+
1359
+ The "corresponding complex dtypes" are:
1360
+ float16 -> complex32
1361
+ bfloat16 -> complex64
1362
+ float32 -> complex64
1363
+ float64 -> complex128
1364
+ complex32 -> complex32
1365
+ complex64 -> complex64
1366
+ complex128 -> complex128
1367
+
1368
+ The DEFAULT type promotion kind computes per above, and then uses the result dtype to pick a computation
1369
+ dtype by mapping low precision floating point and complex dtypes as follows:
1370
+
1371
+ float16 -> float32
1372
+ bfloat16 -> float32
1373
+ complex32 -> complex64
1374
+
1375
+ This is referred to as "op math", and the NO_OPMATH type promotion kind disables this mapping, making the
1376
+ computation dtype the same as the result dtype when it's selected. NO_OPMATH is appropriate for kernels
1377
+ which perform no mathematical operations on their tensors (see below for examples).
1378
+
1379
+ The INT_TO_FLOAT type promotion kind maps boolean and integer result dtypes to the default floating point dtype,
1380
+ and computation dtypes to the appropriate op math dtype.
1381
+
1382
+ The COMPLEX_TO_FLOAT type promotion kind maps complex result dtypes to the corresponding float dtype, following this
1383
+ mapping:
1384
+
1385
+ complex32 -> float16
1386
+ complex64 -> float32
1387
+ complex128 -> float64
1388
+
1389
+ Note that COMPLEX_TO_FLOAT derives the computation dtype as the DEFAULT setting does.
1390
+
1391
+ The BOOL_TO_LONG type promotion kind maps boolean computation and result dtypes to long.
1392
+
1393
+ The ALWAYS_BOOL type promotion kind always sets the result dtype to bool.
1394
+
1395
+ Example operators for each type promotion option:
1396
+ DEFAULT : add
1397
+ NO_OPMATH : where, nextafter, cat
1398
+ INT_TO_FLOAT : sin
1399
+ COMPLEX_TO_FLOAT : abs
1400
+ BOOL_TO_LONG : pow
1401
+ ALWAYS_BOOL : eq
1402
+
1403
+ """
1404
+
1405
+ args = tuple(x for x in _args if x is not None)
1406
+
1407
+ highest_type: type = bool
1408
+
1409
+ # Import sympy locally, as importing it eagerly at a module level is too slow
1410
+ # See https://dev-discuss.pytorch.org/t/delving-into-what-happens-when-you-import-torch/1589
1411
+ import sympy
1412
+
1413
+ for x in args:
1414
+ if not isinstance(x, (Number, TensorLike, sympy.Symbol)):
1415
+ msg = f"Unexpected type {str(type(x))} when computing elementwise type promotion!"
1416
+ raise ValueError(msg)
1417
+
1418
+ if isinstance(x, Number):
1419
+ highest_type = get_higher_type(highest_type, number_type(x))
1420
+ elif isinstance(x, sympy.Symbol):
1421
+ highest_type = get_higher_type(highest_type, symbol_type(x))
1422
+ else:
1423
+ # x is a TensorLike
1424
+ highest_type = get_higher_type(highest_type, dtype_to_type(x.dtype))
1425
+
1426
+ result_dtype = None
1427
+
1428
+ def _find_highest_dtype_filtered(
1429
+ args, filter, *, float_as_complex=False
1430
+ ) -> Optional[torch.dtype]:
1431
+ zero_dim_tensor_dtype = None
1432
+ one_plus_dim_tensor_dtype = None
1433
+ for x in args:
1434
+ if isinstance(x, TensorLike) and filter(x.dtype):
1435
+ _dtype = x.dtype
1436
+ if float_as_complex and is_float_dtype(_dtype):
1437
+ _dtype = corresponding_complex_dtype(_dtype)
1438
+ if x.ndim == 0:
1439
+ zero_dim_tensor_dtype = get_higher_dtype(
1440
+ zero_dim_tensor_dtype, _dtype
1441
+ )
1442
+ else:
1443
+ # x.ndim > 0
1444
+ one_plus_dim_tensor_dtype = get_higher_dtype(
1445
+ one_plus_dim_tensor_dtype, _dtype
1446
+ )
1447
+
1448
+ # Prefers dtype of tensors with one or more dimensions
1449
+ if one_plus_dim_tensor_dtype is not None:
1450
+ return one_plus_dim_tensor_dtype
1451
+
1452
+ return zero_dim_tensor_dtype
1453
+
1454
+ if highest_type is float:
1455
+ result_dtype = _find_highest_dtype_filtered(args, is_float_dtype)
1456
+ result_dtype = (
1457
+ torch.get_default_dtype() if result_dtype is None else result_dtype
1458
+ )
1459
+ elif highest_type is complex:
1460
+ result_dtype = _find_highest_dtype_filtered(
1461
+ args,
1462
+ lambda x: is_float_dtype(x) or is_complex_dtype(x),
1463
+ float_as_complex=True,
1464
+ )
1465
+ if result_dtype is None:
1466
+ result_dtype = corresponding_complex_dtype(torch.get_default_dtype())
1467
+ elif highest_type is int:
1468
+ result_dtype = _find_highest_dtype_filtered(args, is_integer_dtype)
1469
+ result_dtype = torch.long if result_dtype is None else result_dtype
1470
+ else:
1471
+ # highest_type is bool
1472
+ result_dtype = torch.bool
1473
+
1474
+ if type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT:
1475
+ return get_computation_dtype(result_dtype), result_dtype
1476
+ elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH:
1477
+ return result_dtype, result_dtype
1478
+ elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT:
1479
+ if is_integer_dtype(result_dtype) or is_boolean_dtype(result_dtype):
1480
+ result_dtype = torch.get_default_dtype()
1481
+ return get_computation_dtype(result_dtype), result_dtype
1482
+ elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT:
1483
+ # NOTE: computation can still occur in a complex dtype
1484
+ computation_dtype = get_computation_dtype(result_dtype)
1485
+ if is_complex_dtype(result_dtype):
1486
+ result_dtype = corresponding_real_dtype(result_dtype)
1487
+ return computation_dtype, result_dtype
1488
+ elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG:
1489
+ if is_boolean_dtype(result_dtype):
1490
+ return torch.long, torch.long
1491
+ return get_computation_dtype(result_dtype), result_dtype
1492
+ elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL:
1493
+ return get_computation_dtype(result_dtype), torch.bool
1494
+ else:
1495
+ raise ValueError(f"Unknown type promotion kind {str(type_promotion_kind)}")
1496
+
1497
+
1498
+ def reduction_dtypes(
1499
+ arg,
1500
+ output_dtype_kind: REDUCTION_OUTPUT_TYPE_KIND,
1501
+ dtype: Optional[torch.dtype] = None,
1502
+ ) -> Tuple[torch.dtype, Optional[torch.dtype]]:
1503
+ # even though some reductions, like amin or amax, don't strictly require type promotion,
1504
+ # all the math ops (including comparisons) are still defined only for a computation type,
1505
+ # so promotion will still happen. We are doing it explicitly here
1506
+ inp_dtype = dtype if dtype is not None else arg.dtype
1507
+ computation_dtype = get_computation_dtype(inp_dtype)
1508
+ if (
1509
+ output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.SAME
1510
+ or output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT
1511
+ ):
1512
+ result_dtype = dtype if dtype else arg.dtype
1513
+ if (
1514
+ output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT
1515
+ and is_complex_dtype(result_dtype)
1516
+ ):
1517
+ result_dtype = corresponding_real_dtype(result_dtype)
1518
+ elif output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.KEEP_PROMOTED_TYPE:
1519
+ result_dtype = None
1520
+ else: # ALWAYS_BOOL
1521
+ result_dtype = torch.bool
1522
+ return computation_dtype, result_dtype
1523
+
1524
+
1525
+ # This function's logic is borrowed from the following functions defined in C++:
1526
+ # batched_matrix_contiguous_strides and contiguous_strides
1527
+ def make_contiguous_strides_for(
1528
+ shape: ShapeType, row_major: bool = True
1529
+ ) -> Tuple[int, ...]:
1530
+ """
1531
+ Returns the strides of a contiguous tensor if row_major
1532
+ If row_major=True, it returns the strides of a contiguous batch of Fortran-contiguous matrices
1533
+ This is often used when calling external libraries like BLAS/LAPACK/cuSolver...
1534
+ """
1535
+ # contiguous_strides from c10/util/strides.h
1536
+ validate_shape(shape)
1537
+ if not shape:
1538
+ return ()
1539
+
1540
+ # TODO: Move this somewhere central?
1541
+ def _is_singleton(s):
1542
+ # check for SingletonSymNode
1543
+ if not isinstance(s, torch.SymInt):
1544
+ return False
1545
+ if s.node.singleton_int() is not None:
1546
+ return True
1547
+
1548
+ # check for SymInt wrapping a SingletonSymNode (fake-ifying causes this)
1549
+ return (
1550
+ s.node.is_symbolic()
1551
+ and s.node.hint is not None
1552
+ and isinstance(s.node.hint, torch.SymInt)
1553
+ and s.node.hint.node.singleton_int() is not None
1554
+ )
1555
+
1556
+ multiplier = 1
1557
+ strides = []
1558
+ for l in reversed(shape):
1559
+ strides.append(multiplier)
1560
+ multiplier *= l if _is_singleton(l) else sym_max(l, 1)
1561
+
1562
+ result = tuple(reversed(strides))
1563
+
1564
+ # batched_matrix_contiguous_strides from aten/src/ATen/native/LinearAlgebraUtils.h
1565
+ if row_major:
1566
+ return result
1567
+ else:
1568
+ if len(shape) < 2:
1569
+ return result
1570
+ return result[:-2] + (1, max(shape[-2], 1))
1571
+
1572
+
1573
+ def make_channels_last_1d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
1574
+ torch._check(
1575
+ len(shape) == 3,
1576
+ lambda: "Only tensors of rank 3 can use the channels_last_1d memory format",
1577
+ )
1578
+
1579
+ multiplier = 1
1580
+ strides = [0] * 3
1581
+ for idx in (1, -1, 0):
1582
+ # NOTE: intentionally divergence from make_contiguous_strides_for
1583
+ # This is consistent with eager
1584
+ strides[idx] = multiplier
1585
+ multiplier *= shape[idx]
1586
+
1587
+ return tuple(strides)
1588
+
1589
+
1590
+ def make_channels_last_2d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
1591
+ # TODO: maybe inform the user of channels_last_3d if rank of the tensor is 5?
1592
+ torch._check(
1593
+ len(shape) == 4,
1594
+ lambda: "Only tensors of rank 4 can use the channels_last memory format",
1595
+ )
1596
+
1597
+ multiplier = 1
1598
+ strides = [0] * 4
1599
+ for idx in (1, -1, -2, 0):
1600
+ # NOTE: intentionally divergence from make_contiguous_strides_for
1601
+ # This is consistent with eager
1602
+ strides[idx] = multiplier
1603
+ multiplier *= shape[idx]
1604
+
1605
+ return tuple(strides)
1606
+
1607
+
1608
+ def make_channels_last_3d_strides_for(shape: ShapeType) -> Tuple[int, ...]:
1609
+ torch._check(
1610
+ len(shape) == 5,
1611
+ lambda: "Only tensors of rank 5 can use the channels_last_3d memory format",
1612
+ )
1613
+
1614
+ multiplier = 1
1615
+ strides = [0] * 5
1616
+ for idx in (1, -1, -2, -3, 0):
1617
+ # NOTE: intentionally divergence from make_contiguous_strides_for
1618
+ # This is consistent with eager
1619
+ strides[idx] = multiplier
1620
+ multiplier *= shape[idx]
1621
+
1622
+ return tuple(strides)
1623
+
1624
+
1625
+ def make_channels_last_strides_for(shape: ShapeType) -> Tuple[int, ...]:
1626
+ ndim = len(shape) if isinstance(shape, Sequence) else 1
1627
+ if ndim == 3:
1628
+ return make_channels_last_1d_strides_for(shape)
1629
+ elif ndim == 4:
1630
+ return make_channels_last_2d_strides_for(shape)
1631
+ elif ndim == 5:
1632
+ return make_channels_last_3d_strides_for(shape)
1633
+ else:
1634
+ raise RuntimeError(
1635
+ f"no channels last format strides exist in {ndim} dimensions"
1636
+ )
1637
+
1638
+
1639
+ def compute_reduction_output_shape(
1640
+ shape: ShapeType, dimensions: Sequence
1641
+ ) -> Tuple[int, ...]:
1642
+ for idx in dimensions:
1643
+ validate_idx(len(shape), idx)
1644
+
1645
+ new_shape = []
1646
+ for idx in range(len(shape)):
1647
+ if idx in dimensions:
1648
+ continue
1649
+
1650
+ new_shape.append(shape[idx])
1651
+
1652
+ return tuple(new_shape)
1653
+
1654
+
1655
+ def validate_no_repeating_dims(dims: Sequence):
1656
+ if len(dims) != len(set(dims)):
1657
+ raise RuntimeError("duplicate value in the list of dims")
1658
+
1659
+
1660
+ def reduction_dims(shape: ShapeType, dims: Optional[Sequence]) -> Tuple[int, ...]:
1661
+ if dims is None:
1662
+ return tuple(range(len(shape)))
1663
+ dims = tuple(canonicalize_dim(len(shape), idx) for idx in dims)
1664
+ validate_no_repeating_dims(dims)
1665
+ return dims
1666
+
1667
+
1668
+ def set_correction(
1669
+ unbiased: Optional[bool] = None,
1670
+ correction: Optional[NumberType] = None,
1671
+ ) -> float:
1672
+ if correction is not None and unbiased is not None:
1673
+ raise RuntimeError("cannot specify both correction and unbiased arguments")
1674
+ elif correction is None and unbiased is None:
1675
+ correction = 1.0
1676
+ elif correction is None and unbiased is not None:
1677
+ correction = 0.0 if unbiased is False else 1.0
1678
+ # NB: we don't actually support symint here, but it's harmless to accept
1679
+ if not isinstance(correction, (IntLike, FloatLike)):
1680
+ raise ValueError("correction argument should be integer or float")
1681
+ if correction < 0:
1682
+ raise ValueError("correction argument should be non-negative")
1683
+ return sym_float(correction)
1684
+
1685
+
1686
+ def compute_required_storage_length(
1687
+ shape: ShapeType, strides: StrideType, storage_offset: int
1688
+ ) -> int:
1689
+ """Computes the minimum storage size to hold the given tensor geometry.
1690
+
1691
+ Example
1692
+ =======
1693
+
1694
+ This is the size of a newly allocated tensor's storage, in units of elements
1695
+
1696
+ >>> t = torch.empty((10, 20))
1697
+ >>> compute_required_storage_length(t.shape, t.stride(), t.storage_offset())
1698
+ 200
1699
+
1700
+ >>> # xdoctest: +SKIP(failing)
1701
+ >>> t2 = torch.empty_strided((1, 2, 3), (5, 7, 11))
1702
+ >>> size = compute_required_storage_length(t2.shape, t2.stride(), t2.storage_offset())
1703
+ >>> size == t.storage().size()
1704
+ True
1705
+
1706
+ A valid tensor may have a larger storage size, but never smaller
1707
+
1708
+ >>> slice = torch.empty(100)[20:40]
1709
+ >>> slice.storage().size()
1710
+ 100
1711
+
1712
+ >>> compute_required_storage_length(slice.shape, slice.stride(), slice.storage_offset())
1713
+ 40
1714
+
1715
+ """
1716
+ # Short-circuits if the shape has no elements
1717
+ if reduce(operator.mul, shape, 1) == 0:
1718
+ return 0
1719
+
1720
+ max_offset = sum((x - 1) * y for x, y in zip(shape, strides))
1721
+ # +1 to account for the first element which offsets are taken from
1722
+ return 1 + storage_offset + max_offset
1723
+
1724
+
1725
+ def check_in_bounds_for_storage(
1726
+ a: torch.TypedStorage, shape: ShapeType, strides: StrideType, storage_offset: int
1727
+ ):
1728
+ """
1729
+ Determines if the given shape, strides, and offset are valid for the given storage.
1730
+ """
1731
+
1732
+ required_length = compute_required_storage_length(shape, strides, storage_offset)
1733
+ if a.size() < required_length:
1734
+ msg = (
1735
+ "Can't view a storage of size {} with an offset of {}, shape of {}, and strides of {}, "
1736
+ "which requires a storage of size {}".format(
1737
+ a.size(), storage_offset, str(shape), str(strides), required_length
1738
+ )
1739
+ )
1740
+ raise ValueError(msg)
1741
+
1742
+
1743
+ # NOTE: This function should ideally be removed, but some Meta internal models
1744
+ # packaged with `torch.package` are using it, so it will have to be removed
1745
+ # at some point in the future when those models no longer use this function.
1746
+ def check(
1747
+ b: bool, s: Callable[[], str], exc_type: Type[Exception] = RuntimeError
1748
+ ) -> None:
1749
+ """
1750
+ Helper function for raising an error_type (default: RuntimeError) if a boolean condition fails.
1751
+ Error message is a callable producing a string (to avoid wasting time
1752
+ string formatting in non-error case, and also to make it easier for torchdynamo
1753
+ to trace.)
1754
+
1755
+ .. note:: This function is planned for removal in the future. Please use
1756
+ `torch._check*` functions instead.
1757
+ """
1758
+ warnings.warn(
1759
+ DeprecationWarning(
1760
+ "'torch._prims_common.check' will be removed in the future. Please use "
1761
+ "'torch._check*' functions instead"
1762
+ )
1763
+ )
1764
+ torch._check_with(exc_type, b, s)
1765
+
1766
+
1767
+ # This combines is_channels_last_strides_2d and is_channels_last_strides_3d in
1768
+ # c10/core/MemoryFormat.h into one function
1769
+ def are_strides_like_channels_last(
1770
+ shape: Sequence[int], strides: Sequence[int]
1771
+ ) -> bool:
1772
+ ndim = len(shape)
1773
+
1774
+ if ndim == 4:
1775
+ # Check for channels_last_2d
1776
+ dim_order = [1, 3, 2, 0]
1777
+ elif ndim == 5:
1778
+ # Check for channels_last_3d
1779
+ dim_order = [1, 4, 3, 2, 0]
1780
+ else:
1781
+ return False
1782
+
1783
+ if strides[1] == 0:
1784
+ return False
1785
+
1786
+ min = 0
1787
+ for d in dim_order:
1788
+ if shape[d] == 0:
1789
+ return False
1790
+ if strides[d] < min:
1791
+ return False
1792
+ if d == 0 and min == strides[1]:
1793
+ return False
1794
+ min = strides[d]
1795
+ if strides[d] > 1:
1796
+ min *= shape[d]
1797
+ return True
1798
+
1799
+
1800
+ def suggest_memory_format(x: TensorLikeType) -> torch.memory_format:
1801
+ if x.layout != torch.strided:
1802
+ return torch.contiguous_format
1803
+
1804
+ if are_strides_like_channels_last(x.shape, x.stride()):
1805
+ return torch.channels_last if x.ndim == 4 else torch.channels_last_3d
1806
+
1807
+ return torch.contiguous_format
1808
+
1809
+
1810
+ def prod(xs: Sequence[NumberType]) -> NumberType:
1811
+ """Product of elements in input sequence. Returns 1 for empty sequence"""
1812
+ return reduce(operator.mul, xs, 1)
1813
+
1814
+
1815
+ def is_expandable_to(shape: ShapeType, desired: ShapeType) -> bool:
1816
+ """Checks if a shape can be expanded to another shape.
1817
+ This is equivalent to checking if the two shapes are broadcastable.
1818
+ """
1819
+ # This is a Python implementation of
1820
+ # aten/src/ATen/ExpandUtils.h:is_expandable_to
1821
+ if len(shape) > len(desired):
1822
+ return False
1823
+ for i in range(len(shape)):
1824
+ if shape[-i - 1] != desired[-i - 1] and shape[-i - 1] != 1:
1825
+ return False
1826
+ return True
1827
+
1828
+
1829
+ def mask_tensor(mask: TensorLikeType, t: TensorLikeType):
1830
+ """
1831
+ Similar to torch.where(mask, t, 0) but if t is boolean,
1832
+ result is also boolean and not promoted to int.
1833
+ """
1834
+ # torch.where(mask, t, False) is equivalent
1835
+ # but feels hacky and might break in the future
1836
+ if t.dtype is torch.bool:
1837
+ return mask.logical_and(t)
1838
+ else:
1839
+ return torch.where(mask, t, 0)
1840
+
1841
+
1842
+ def get_aten_op(fn: Callable, name: str):
1843
+ """
1844
+ Given the __module__ of reference and its name, it returns
1845
+ (our best guess of) the ATen name of the associated operation
1846
+
1847
+ Note: In ATen, the __name__ of a function within a module often
1848
+ starts by the module name. E.g. linalg_eigh, or special_zeta
1849
+ """
1850
+ module = fn.__module__
1851
+ prefix = "torch._refs"
1852
+ assert module.startswith(prefix)
1853
+ module = module[len(prefix) :]
1854
+ # We want to go from .special / .nn.functional
1855
+ # to special and special_ / nn_functional_
1856
+ if module:
1857
+ module = module[1:]
1858
+ module = module.replace(".", "_")
1859
+ module = module + "_"
1860
+ return getattr(torch._ops.ops.aten, f"{module}{name}")
1861
+
1862
+
1863
+ def dtype_or_default(dtype: Optional[torch.dtype]) -> torch.dtype:
1864
+ return dtype if dtype is not None else torch.get_default_dtype()
1865
+
1866
+
1867
+ def device_or_default(device: Optional[DeviceLikeType]) -> DeviceLikeType:
1868
+ return device if device is not None else torch.device("cpu")
1869
+
1870
+
1871
+ def layout_or_default(layout: Optional[torch.layout]) -> torch.layout:
1872
+ return layout if layout is not None else torch.strided
1873
+
1874
+
1875
+ def clone_preserve_strides(x):
1876
+ needed_size = compute_required_storage_length(
1877
+ x.size(), x.stride(), x.storage_offset()
1878
+ )
1879
+ # Our eager implementations for *_scatter ops are all primitives w.r.t autograd,
1880
+ # so these as_strided() calls are not seen by autograd.
1881
+ # We need to mimic this behavior in our ref/prim implementations.
1882
+ # TODO: a better way to handle this would be with a new op, "_unsafe_as_strided"
1883
+ # We should revisit this when we add a compositional as_strided op,
1884
+ # and also as part of https://github.com/pytorch/pytorch/issues/90507
1885
+ try:
1886
+ old = torch._C._dispatch_tls_is_dispatch_key_excluded(
1887
+ torch._C.DispatchKey.ADInplaceOrView
1888
+ )
1889
+ torch._C._dispatch_tls_set_dispatch_key_excluded(
1890
+ torch._C.DispatchKey.ADInplaceOrView, True
1891
+ )
1892
+ buffer = torch.as_strided(x, (needed_size,), (1,), 0).clone()
1893
+ return torch.as_strided(buffer, x.size(), x.stride(), x.storage_offset())
1894
+ finally:
1895
+ torch._C._dispatch_tls_set_dispatch_key_excluded(
1896
+ torch._C.DispatchKey.ADInplaceOrView, old
1897
+ )
1898
+
1899
+
1900
+ def alert_not_deterministic(caller: str):
1901
+ if torch.are_deterministic_algorithms_enabled():
1902
+ if torch.is_deterministic_algorithms_warn_only_enabled():
1903
+ warnings.warn(
1904
+ f"{caller} does not have a deterministic implementation, but you set "
1905
+ f"'torch.use_deterministic_algorithms(True, warn_only=True)'. "
1906
+ f"You can file an issue at https://github.com/pytorch/pytorch/issues "
1907
+ f"to help us prioritize adding deterministic support for this operation."
1908
+ )
1909
+ else:
1910
+ torch._check(
1911
+ False,
1912
+ lambda: (
1913
+ f"{caller} does not have a deterministic implementation, but you set "
1914
+ f"'torch.use_deterministic_algorithms(True)'. You can turn off "
1915
+ f"determinism just for this operation, or you can use the "
1916
+ f"'warn_only=True' option, if that's acceptable for your application. "
1917
+ f"You can also file an issue at https://github.com/pytorch/pytorch/issues "
1918
+ f"to help us prioritize adding deterministic support for this operation."
1919
+ ),
1920
+ )
1921
+
1922
+
1923
+ class CUDARngStateHelper:
1924
+ @staticmethod
1925
+ def get_torch_state_as_tuple(fake_mode=nullcontext()):
1926
+ if not torch.cuda.is_available():
1927
+ raise RuntimeError("CUDA not available")
1928
+
1929
+ with fake_mode:
1930
+ seed = torch.tensor(torch.cuda.initial_seed())
1931
+ offset = torch.tensor(torch.cuda._get_rng_state_offset())
1932
+ return seed, offset
1933
+
1934
+ @staticmethod
1935
+ def set_torch_state_tensor(seed, offset):
1936
+ # Rng state is [64-bit seed, 64-bit offset]
1937
+ seed_portion = seed.reshape([1]).view(torch.uint8)
1938
+ offset_portion = offset.reshape([1]).view(torch.uint8)
1939
+ new_state = torch.cat([seed_portion, offset_portion])
1940
+ torch.cuda.set_rng_state(new_state)
1941
+
1942
+ @staticmethod
1943
+ def set_new_offset(relative_offset):
1944
+ torch.cuda._set_rng_state_offset(relative_offset.item())
env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (48.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__pycache__/wrappers.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/_prims_common/wrappers.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import warnings
3
+ from functools import wraps
4
+ from itertools import chain
5
+
6
+ from typing import Callable, NamedTuple, Optional, overload, Sequence, Tuple
7
+
8
+ import torch
9
+ import torch._prims_common as utils
10
+ from torch._prims_common import (
11
+ CustomOutParamAnnotation,
12
+ ELEMENTWISE_TYPE_PROMOTION_KIND,
13
+ Number,
14
+ NumberType,
15
+ ShapeType,
16
+ TensorLike,
17
+ TensorLikeType,
18
+ )
19
+ from torch.utils import _pytree as pytree
20
+ from torch.utils._pytree import tree_flatten, tree_unflatten
21
+
22
+
23
+ @overload
24
+ def _maybe_convert_to_dtype(a: TensorLikeType, dtype: torch.dtype) -> TensorLikeType:
25
+ pass
26
+
27
+
28
+ @overload
29
+ def _maybe_convert_to_dtype(a: NumberType, dtype: torch.dtype) -> NumberType:
30
+ pass
31
+
32
+
33
+ @overload
34
+ def _maybe_convert_to_dtype(a: Sequence, dtype: torch.dtype) -> Sequence:
35
+ pass
36
+
37
+
38
+ @overload
39
+ def _maybe_convert_to_dtype(a: None, dtype: torch.dtype) -> None:
40
+ pass
41
+
42
+
43
+ # TODO: implement ref.cast with an option to enforce safe casting
44
+ def _maybe_convert_to_dtype(a, dtype):
45
+ if isinstance(a, TensorLike):
46
+ if a.dtype != dtype:
47
+ return a.to(dtype)
48
+ return a
49
+ if isinstance(a, Number):
50
+ return utils.dtype_to_type_ctor(dtype)(a) # type: ignore[arg-type]
51
+ if isinstance(a, Sequence):
52
+ return tuple(_maybe_convert_to_dtype(x, dtype) for x in a)
53
+ # Passthrough None because some functions wrapped with type promotion
54
+ # wrapper might have optional args
55
+ if a is None:
56
+ return None
57
+
58
+ raise ValueError(f"Received type {type(a)} that is neither a tensor or a number!")
59
+
60
+
61
+ def _maybe_convert_to_type(a: NumberType, typ: type) -> NumberType:
62
+ if not isinstance(a, Number):
63
+ msg = f"Found unknown type {type(a)} when trying to convert scalars!"
64
+ raise ValueError(msg)
65
+ if not utils.is_weakly_lesser_type(type(a), typ):
66
+ msg = f"Scalar {a} of type {type(a)} cannot be safely cast to type {typ}!"
67
+ raise ValueError(msg)
68
+
69
+ return typ(a)
70
+
71
+
72
+ def _annotation_has_type(*, typ, annotation):
73
+ if hasattr(annotation, "__args__"):
74
+ for a in annotation.__args__:
75
+ if _annotation_has_type(typ=typ, annotation=a):
76
+ return True
77
+ return False
78
+
79
+ return typ is annotation
80
+
81
+
82
+ class elementwise_type_promotion_wrapper:
83
+ """
84
+ Adds elementwise type promotion to a Python reference implementation.
85
+
86
+ Takes two kwargs, type_promoting_args and type_promotion_kind.
87
+
88
+ type_promoting_args must be a string Sequence specifiying the argument names of all
89
+ arguments that participate in type promotion (and should be type promoted). If the
90
+ arg specifies a Sequence-type then every element of the Sequence will participate in
91
+ type promotion.
92
+
93
+ type_promotion_kind must be one of the kinds specified by ELEMENTWISE_TYPE_PROMOTION_KIND.
94
+ See its documentation for details.
95
+
96
+ The return_dtype will be coerced to the wrapped function's dtype arg if it is available and
97
+ not None.
98
+
99
+ Other type promotion behavior, like validating the Python type of scalar arguments, must
100
+ be handled separately.
101
+ """
102
+
103
+ def __init__(
104
+ self,
105
+ *,
106
+ type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND,
107
+ type_promoting_args: Optional[Sequence[str]] = None,
108
+ ):
109
+ self.type_promoting_arg_names = type_promoting_args
110
+ self.type_promotion_kind = type_promotion_kind
111
+
112
+ def __call__(self, fn: Callable) -> Callable:
113
+ sig = inspect.signature(fn)
114
+
115
+ @wraps(fn)
116
+ def _fn(*args, **kwargs):
117
+ bound = sig.bind(*args, **kwargs)
118
+ type_promoting_args = tuple(
119
+ bound.arguments[x]
120
+ for x in self.type_promoting_arg_names # type: ignore[union-attr]
121
+ if x in bound.arguments.keys()
122
+ )
123
+
124
+ flattened_type_promoting_args = pytree.arg_tree_leaves(*type_promoting_args)
125
+ compute_dtype, result_dtype = utils.elementwise_dtypes(
126
+ *flattened_type_promoting_args,
127
+ type_promotion_kind=self.type_promotion_kind,
128
+ )
129
+
130
+ promoted_args = {
131
+ x: _maybe_convert_to_dtype(bound.arguments[x], compute_dtype)
132
+ for x in self.type_promoting_arg_names # type: ignore[union-attr]
133
+ if x in bound.arguments.keys()
134
+ }
135
+ bound.arguments.update(promoted_args)
136
+
137
+ result = fn(**bound.arguments)
138
+
139
+ # Override the return_dtype if a dtype arg is present and not None
140
+ if "dtype" in bound.arguments:
141
+ maybe_dtype = bound.arguments["dtype"]
142
+ if maybe_dtype: # dtype cannot be None
143
+ result_dtype = maybe_dtype
144
+
145
+ if isinstance(result, TensorLike):
146
+ return _maybe_convert_to_dtype(result, result_dtype)
147
+ if isinstance(result, Sequence):
148
+ return tuple(_maybe_convert_to_dtype(x, result_dtype) for x in result)
149
+ raise AssertionError(f"Unhandled result type: {type(result)}")
150
+
151
+ _fn.__signature__ = sig # type: ignore[attr-defined]
152
+ return _fn
153
+
154
+
155
+ # Returns True if resize is necessary
156
+ def _resize_output_check(out: TensorLikeType, shape: ShapeType):
157
+ # If the shapes are correct there's nothing to do
158
+ if utils.same_shape(out.shape, shape):
159
+ return False
160
+ if out.numel() != 0:
161
+ msg = (
162
+ f"An output with one or more elements was resized since it had shape {str(out.shape)} "
163
+ "which does not match the required output shape {str(shape)}. "
164
+ "This behavior is deprecated, and in a future PyTorch release outputs will not "
165
+ "be resized unless they have zero elements. "
166
+ "You can explicitly reuse an out tensor t by resizing it, inplace, to zero elements with t.resize_(0)."
167
+ )
168
+ warnings.warn(msg)
169
+ return True
170
+
171
+
172
+ # TODO: handle tuples of tensors
173
+ def _maybe_resize_out(out: TensorLikeType, shape: ShapeType):
174
+ if _resize_output_check(out, shape):
175
+ return out.resize_(shape)
176
+ else:
177
+ return out
178
+
179
+
180
+ def _safe_copy_out(
181
+ *, copy_from: TensorLikeType, copy_to: TensorLikeType, exact_dtype: bool = False
182
+ ):
183
+ # Checks same device
184
+ if copy_from.device != copy_to.device:
185
+ msg = "Attempting to copy from device {} to device {}, but cross-device copies are not allowed!".format(
186
+ copy_from.device, copy_to.device
187
+ )
188
+ raise RuntimeError(msg)
189
+
190
+ # Checks safe cast
191
+ if exact_dtype:
192
+ torch._check(
193
+ copy_from.dtype == copy_to.dtype,
194
+ lambda: f"Expected out tensor to have dtype {copy_from.dtype} "
195
+ f"but got {copy_to.dtype} instead",
196
+ )
197
+ else:
198
+ torch._check(
199
+ utils.can_safe_cast_to(cast_from=copy_from.dtype, cast_to=copy_to.dtype),
200
+ lambda: f"Attempting to cast from {copy_from.dtype} to out tensor with dtype {copy_to.dtype}, "
201
+ "but this can't be cast because it is not safe!",
202
+ )
203
+
204
+ return copy_to.copy_(copy_from)
205
+
206
+
207
+ def out_wrapper(*out_names: str, exact_dtype: bool = False):
208
+ # The wrapped function needs to convert the output parameters to ensure
209
+ # compatability between the Python API (which always uses "out" as the
210
+ # parameter name and may be a tuple) and the Aten API (which may have
211
+ # multiple output parematers and use different parameter names such as
212
+ # "grad_input", "indices" or "values".)
213
+
214
+ default_out_names = ("out",)
215
+ if len(out_names) == 0:
216
+ # Use default in out name
217
+ out_names = default_out_names
218
+
219
+ is_tensor = len(out_names) == 1
220
+
221
+ def _out_wrapper(fn: Callable) -> Callable:
222
+ """
223
+ Adds the out parameter to a Python reference.
224
+ """
225
+ out_type = (
226
+ TensorLikeType
227
+ if is_tensor
228
+ else Tuple[tuple(TensorLikeType for _ in range(len(out_names)))]
229
+ )
230
+ return_type = (
231
+ TensorLikeType
232
+ if is_tensor
233
+ else NamedTuple(
234
+ f"return_types_{fn.__name__}", [(o, TensorLikeType) for o in out_names]
235
+ )
236
+ )
237
+
238
+ sig = inspect.signature(fn)
239
+ factory_kwargs = ("device", "dtype")
240
+ is_factory_fn = all(p in sig.parameters for p in factory_kwargs)
241
+
242
+ @wraps(fn)
243
+ def _fn(*args, out=None, **kwargs):
244
+ if is_factory_fn and out is not None:
245
+ for k in factory_kwargs:
246
+ out_attr = getattr(out, k)
247
+ if k not in kwargs:
248
+ kwargs[k] = out_attr
249
+
250
+ result = fn(*args, **kwargs)
251
+ assert (
252
+ isinstance(result, TensorLike)
253
+ and is_tensor
254
+ or isinstance(result, Tuple) # type: ignore[arg-type]
255
+ and len(result) == len(out_names)
256
+ )
257
+ if out is not None:
258
+ # Naively you might expect this assert to be true, but
259
+ # it's not:
260
+ #
261
+ # assert type(out) == type(result)
262
+ #
263
+ # The reason is that functions under this wrapper can
264
+ # get registered to the Meta dispatch key, and that
265
+ # means they can be executed in a context where tensor
266
+ # subclasses are disabled (with no_dispatch), which is a
267
+ # handy way for an is-a tensor subclass (e.g.,
268
+ # FakeTensor) to have the normal meta backend create a
269
+ # meta tensor, to be wrapped once it gets returned.
270
+ # In this situation, you will get a FakeTensor as
271
+ # the output tensor, but not the result--which will
272
+ # be a normal meta tensor, but this is perfectly
273
+ # harmless.
274
+ if is_tensor:
275
+ assert isinstance(out, TensorLike)
276
+ # These two operations are done in-place
277
+ _maybe_resize_out(out, result.shape)
278
+ _safe_copy_out(copy_from=result, copy_to=out, exact_dtype=exact_dtype) # type: ignore[arg-type]
279
+ else:
280
+ assert isinstance(out, Tuple) # type: ignore[arg-type]
281
+ torch._check_type(
282
+ len(out) == len(result),
283
+ lambda: f"expected tuple of {len(result)} elements but got {len(out)}",
284
+ )
285
+ for r, o in zip(result, out):
286
+ # These two operations are done in-place
287
+ _maybe_resize_out(o, r.shape)
288
+ _safe_copy_out(copy_from=r, copy_to=o, exact_dtype=exact_dtype) # type: ignore[arg-type]
289
+ else:
290
+ out = result
291
+ # mypy does not see through the definition of out_type given that it's in a different scope
292
+ return out if is_tensor else return_type(*out) # type: ignore[operator]
293
+
294
+ out_param = inspect.Parameter(
295
+ "out",
296
+ kind=inspect.Parameter.KEYWORD_ONLY,
297
+ default=None,
298
+ annotation=out_type,
299
+ )
300
+ # Mark that the function now returns a tuple
301
+ assert isinstance(sig.return_annotation, str) or sig.return_annotation in (
302
+ sig.empty,
303
+ out_type,
304
+ )
305
+ params = chain(sig.parameters.values(), (out_param,))
306
+ _fn.__signature__ = inspect.Signature( # type: ignore[attr-defined]
307
+ parameters=params, return_annotation=return_type # type: ignore[arg-type]
308
+ )
309
+
310
+ _fn.__annotations__ = fn.__annotations__
311
+ _fn.__annotations__["out"] = out_type
312
+ _fn.__annotations__["return"] = return_type
313
+
314
+ # In the special case of having a single tensor out parameter with a
315
+ # name other than out, add a special annotation to name the parameter
316
+ if is_tensor and out_names != default_out_names:
317
+ _fn.__annotations__[CustomOutParamAnnotation] = out_names[0]
318
+
319
+ # Add an indicator attribute that can be used in special cases
320
+ # where having a function wrapped by `out_wrapper` is not desirable e.g.
321
+ # jit
322
+ _fn._torch_decompositions_out_wrapper = f"This function is wrapped by {out_wrapper.__module__}.out_wrapper" # type: ignore[attr-defined]
323
+
324
+ return _fn
325
+
326
+ return _out_wrapper
327
+
328
+
329
+ def _maybe_remove_out_wrapper(fn: Callable):
330
+ return inspect.unwrap(
331
+ fn,
332
+ stop=lambda f: not hasattr(f, "_torch_decompositions_out_wrapper"),
333
+ )
334
+
335
+
336
+ def backwards_not_supported(prim):
337
+ def redispatch_prim(args, kwargs):
338
+ with torch._C._AutoDispatchBelowAutograd():
339
+ old = torch._C._dispatch_tls_is_dispatch_key_excluded(
340
+ torch._C.DispatchKey.ADInplaceOrView
341
+ )
342
+ return prim(*args, **kwargs)
343
+
344
+ class BackwardsNotSupported(torch.autograd.Function):
345
+ @staticmethod
346
+ def forward(ctx, args_spec, *flat_args):
347
+ args, kwargs = tree_unflatten(flat_args, args_spec) # type: ignore[arg-type]
348
+ return redispatch_prim(args, kwargs)
349
+
350
+ @staticmethod
351
+ def backward(ctx, *args):
352
+ raise RuntimeError("backwards not supported on prim")
353
+
354
+ @wraps(prim)
355
+ def _autograd_impl(*args, **kwargs):
356
+ flat_args, args_spec = tree_flatten((args, kwargs))
357
+ if torch.is_grad_enabled() and any(
358
+ a.requires_grad for a in flat_args if isinstance(a, torch.Tensor)
359
+ ):
360
+ # TODO: There is a subtle bug here: prims like copy_to
361
+ # return their input argument after mutating it; and custom
362
+ # autograd function will incorrectly turn the result into
363
+ # a view which will fail test_python_ref_executor tests.
364
+ # At the moment, we sidestep this by observing that the
365
+ # unit tests don't ever try to run the executor with
366
+ # autograd, so we don't exercise the buggy case, but if
367
+ # you ever want to feed autograd through this, be aware
368
+ # of it! We need a way of properly implementing autograd
369
+ # for mutating operations in Python to do this.
370
+ return BackwardsNotSupported.apply(args_spec, *flat_args)
371
+ else:
372
+ return redispatch_prim(args, kwargs)
373
+
374
+ return _autograd_impl
375
+
376
+
377
+ # TODO: when tracing this will add torch tensors and not TensorMeta objects
378
+ # to the trace -- we should fix this by adding a tracing context and NumberMeta classes
379
+ # TODO: this wrapper is currently untested
380
+ def elementwise_unary_scalar_wrapper(fn: Callable) -> Callable:
381
+ """
382
+ Allows unary operators that accept tensors to work with Python numbers.
383
+ """
384
+ sig = inspect.signature(fn)
385
+
386
+ @wraps(fn)
387
+ def _fn(*args, **kwargs):
388
+ if len(args) > 0 and isinstance(args[0], Number):
389
+ dtype = utils.type_to_dtype(type(args[0]))
390
+ args_ = list(args)
391
+ args_[0] = torch.tensor(args[0], dtype=dtype)
392
+ result = fn(*args_, **kwargs)
393
+ assert isinstance(result, torch.Tensor)
394
+ return result.item()
395
+
396
+ return fn(*args, **kwargs)
397
+
398
+ _fn.__signature__ = sig # type: ignore[attr-defined]
399
+ return _fn
env-llmeval/lib/python3.10/site-packages/torch/cuda/__init__.py ADDED
@@ -0,0 +1,1421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ This package adds support for CUDA tensor types.
3
+
4
+ It implements the same function as CPU tensors, but they utilize
5
+ GPUs for computation.
6
+
7
+ It is lazily initialized, so you can always import it, and use
8
+ :func:`is_available()` to determine if your system supports CUDA.
9
+
10
+ :ref:`cuda-semantics` has more details about working with CUDA.
11
+ """
12
+
13
+
14
+ import contextlib
15
+ import importlib
16
+ import os
17
+ import sys
18
+ import threading
19
+ import traceback
20
+ import warnings
21
+ from functools import lru_cache
22
+ from typing import Any, cast, List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch._C
26
+ from torch.types import Device
27
+ from .. import device as _device
28
+ from .._utils import classproperty
29
+ from ._utils import _dummy_type, _get_device_index
30
+ from .graphs import (
31
+ CUDAGraph,
32
+ graph,
33
+ graph_pool_handle,
34
+ is_current_stream_capturing,
35
+ make_graphed_callables,
36
+ )
37
+ from .streams import Event, ExternalStream, Stream
38
+
39
+ try:
40
+ from torch._C import _cudart # type: ignore[attr-defined]
41
+ except ImportError:
42
+ _cudart = None
43
+
44
+ _initialized = False
45
+ _tls = threading.local()
46
+ _initialization_lock = threading.Lock()
47
+ _queued_calls = [] # don't invoke these until initialization occurs
48
+ _is_in_bad_fork = getattr(torch._C, "_cuda_isInBadFork", lambda: False)
49
+ _device_t = Union[_device, str, int, None]
50
+
51
+ _HAS_PYNVML = False
52
+ _PYNVML_ERR = None
53
+ try:
54
+ import pynvml # type: ignore[import]
55
+
56
+ _HAS_PYNVML = True
57
+ except ImportError as err:
58
+ _PYNVML_ERR = err # sometimes a lib is installed but the import fails for some other reason, so we log the error for later
59
+
60
+
61
+ class _LazySeedTracker:
62
+ # Since seeding is memory-less, only track the latest seed.
63
+ # Note: `manual_seed_all` followed by `manual_seed` overwrites
64
+ # the seed on current device. We track the order of **latest**
65
+ # calls between these two API.
66
+ def __init__(self):
67
+ self.manual_seed_all_cb = None
68
+ self.manual_seed_cb = None
69
+ self.call_order = []
70
+
71
+ def queue_seed_all(self, cb, traceback):
72
+ self.manual_seed_all_cb = (cb, traceback)
73
+ # update seed_all to be latest
74
+ self.call_order = [self.manual_seed_cb, self.manual_seed_all_cb]
75
+
76
+ def queue_seed(self, cb, traceback):
77
+ self.manual_seed_cb = (cb, traceback)
78
+ # update seed to be latest
79
+ self.call_order = [self.manual_seed_all_cb, self.manual_seed_cb]
80
+
81
+ def get_calls(self) -> List:
82
+ return self.call_order
83
+
84
+
85
+ _lazy_seed_tracker = _LazySeedTracker()
86
+
87
+ # Define dummy _CudaDeviceProperties type if PyTorch was compiled without CUDA
88
+ if hasattr(torch._C, "_CudaDeviceProperties"):
89
+ _CudaDeviceProperties = torch._C._CudaDeviceProperties
90
+ else:
91
+ _CudaDeviceProperties = _dummy_type("_CudaDeviceProperties") # type: ignore[assignment, misc]
92
+
93
+ if hasattr(torch._C, "_cuda_exchangeDevice"):
94
+ _exchange_device = torch._C._cuda_exchangeDevice
95
+ else:
96
+
97
+ def _exchange_device(device: int) -> int:
98
+ if device < 0:
99
+ return -1
100
+ raise RuntimeError("PyTorch was compiled without CUDA support")
101
+
102
+
103
+ if hasattr(torch._C, "_cuda_maybeExchangeDevice"):
104
+ _maybe_exchange_device = torch._C._cuda_maybeExchangeDevice
105
+ else:
106
+
107
+ def _maybe_exchange_device(device: int) -> int:
108
+ if device < 0:
109
+ return -1
110
+ raise RuntimeError("PyTorch was compiled without CUDA support")
111
+
112
+
113
+ # Global variables dynamically populated by native code
114
+ has_magma: bool = False
115
+ has_half: bool = False
116
+ default_generators: Tuple[torch._C.Generator] = () # type: ignore[assignment]
117
+
118
+
119
+ def _is_compiled() -> bool:
120
+ r"""Return true if compile with CUDA support."""
121
+ return hasattr(torch._C, "_cuda_getDeviceCount")
122
+
123
+
124
+ def _nvml_based_avail() -> bool:
125
+ return os.getenv("PYTORCH_NVML_BASED_CUDA_CHECK") == "1"
126
+
127
+
128
+ def is_available() -> bool:
129
+ r"""Return a bool indicating if CUDA is currently available."""
130
+ if not _is_compiled():
131
+ return False
132
+ if _nvml_based_avail():
133
+ # The user has set an env variable to request this availability check that attempts to avoid fork poisoning by
134
+ # using NVML at the cost of a weaker CUDA availability assessment. Note that if NVML discovery/initialization
135
+ # fails, this assessment falls back to the default CUDA Runtime API assessment (`cudaGetDeviceCount`)
136
+ return device_count() > 0
137
+ else:
138
+ # The default availability inspection never throws and returns 0 if the driver is missing or can't
139
+ # be initialized. This uses the CUDA Runtime API `cudaGetDeviceCount` which in turn initializes the CUDA Driver
140
+ # API via `cuInit`
141
+ return torch._C._cuda_getDeviceCount() > 0
142
+
143
+
144
+ def is_bf16_supported():
145
+ r"""Return a bool indicating if the current CUDA/ROCm device supports dtype bfloat16."""
146
+ # Check for ROCm, if true return true, no ROCM_VERSION check required,
147
+ # since it is supported on AMD GPU archs.
148
+ if torch.version.hip:
149
+ return True
150
+
151
+ cu_vers = torch.version.cuda
152
+ if cu_vers is not None:
153
+ cuda_maj_decide = int(cu_vers.split(".")[0]) >= 11
154
+ else:
155
+ cuda_maj_decide = False
156
+ return (
157
+ torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8
158
+ and cuda_maj_decide
159
+ )
160
+
161
+
162
+ def _sleep(cycles):
163
+ torch._C._cuda_sleep(cycles)
164
+
165
+
166
+ def _check_capability():
167
+ incorrect_binary_warn = """
168
+ Found GPU%d %s which requires CUDA_VERSION >= %d to
169
+ work properly, but your PyTorch was compiled
170
+ with CUDA_VERSION %d. Please install the correct PyTorch binary
171
+ using instructions from https://pytorch.org
172
+ """
173
+
174
+ old_gpu_warn = """
175
+ Found GPU%d %s which is of cuda capability %d.%d.
176
+ PyTorch no longer supports this GPU because it is too old.
177
+ The minimum cuda capability supported by this library is %d.%d.
178
+ """
179
+
180
+ if torch.version.cuda is not None: # on ROCm we don't want this check
181
+ CUDA_VERSION = torch._C._cuda_getCompiledVersion()
182
+ for d in range(device_count()):
183
+ capability = get_device_capability(d)
184
+ major = capability[0]
185
+ minor = capability[1]
186
+ name = get_device_name(d)
187
+ current_arch = major * 10 + minor
188
+ min_arch = min(
189
+ (int(arch.split("_")[1]) for arch in torch.cuda.get_arch_list()),
190
+ default=35,
191
+ )
192
+ if current_arch < min_arch:
193
+ warnings.warn(
194
+ old_gpu_warn
195
+ % (d, name, major, minor, min_arch // 10, min_arch % 10)
196
+ )
197
+
198
+
199
+ def _check_cubins():
200
+ incompatible_device_warn = """
201
+ {} with CUDA capability sm_{} is not compatible with the current PyTorch installation.
202
+ The current PyTorch install supports CUDA capabilities {}.
203
+ If you want to use the {} GPU with PyTorch, please check the instructions at https://pytorch.org/get-started/locally/
204
+ """
205
+ if torch.version.cuda is None: # on ROCm we don't want this check
206
+ return
207
+ arch_list = get_arch_list()
208
+ if len(arch_list) == 0:
209
+ return
210
+ supported_sm = [int(arch.split("_")[1]) for arch in arch_list if "sm_" in arch]
211
+ for idx in range(device_count()):
212
+ cap_major, cap_minor = get_device_capability(idx)
213
+ # NVIDIA GPU compute architectures are backward compatible within major version
214
+ supported = any(sm // 10 == cap_major for sm in supported_sm)
215
+ if not supported:
216
+ device_name = get_device_name(idx)
217
+ capability = cap_major * 10 + cap_minor
218
+ warnings.warn(
219
+ incompatible_device_warn.format(
220
+ device_name, capability, " ".join(arch_list), device_name
221
+ )
222
+ )
223
+
224
+
225
+ def is_initialized():
226
+ r"""Return whether PyTorch's CUDA state has been initialized."""
227
+ return _initialized and not _is_in_bad_fork()
228
+
229
+
230
+ def _lazy_call(callable, **kwargs):
231
+ if is_initialized():
232
+ callable()
233
+ else:
234
+ # TODO(torch_deploy): this accesses linecache, which attempts to read the
235
+ # file system to get traceback info. Patch linecache or do something
236
+ # else here if this ends up being important.
237
+ global _lazy_seed_tracker
238
+ if kwargs.get("seed_all", False):
239
+ _lazy_seed_tracker.queue_seed_all(callable, traceback.format_stack())
240
+ elif kwargs.get("seed", False):
241
+ _lazy_seed_tracker.queue_seed(callable, traceback.format_stack())
242
+ else:
243
+ # Don't store the actual traceback to avoid memory cycle
244
+ _queued_calls.append((callable, traceback.format_stack()))
245
+
246
+
247
+ _lazy_call(_check_capability)
248
+ _lazy_call(_check_cubins)
249
+
250
+
251
+ class DeferredCudaCallError(Exception):
252
+ pass
253
+
254
+
255
+ OutOfMemoryError = torch._C._OutOfMemoryError
256
+
257
+
258
+ def init():
259
+ r"""Initialize PyTorch's CUDA state.
260
+
261
+ You may need to call this explicitly if you are interacting with
262
+ PyTorch via its C API, as Python bindings for CUDA functionality
263
+ will not be available until this initialization takes place.
264
+ Ordinary users should not need this, as all of PyTorch's CUDA methods
265
+ automatically initialize CUDA state on-demand.
266
+
267
+ Does nothing if the CUDA state is already initialized.
268
+ """
269
+ _lazy_init()
270
+
271
+
272
+ def _lazy_init():
273
+ global _initialized, _queued_calls
274
+ if is_initialized() or hasattr(_tls, "is_initializing"):
275
+ return
276
+ with _initialization_lock:
277
+ # We be double-checked locking, boys! This is OK because
278
+ # the above test was GIL protected anyway. The inner test
279
+ # is for when a thread blocked on some other thread which was
280
+ # doing the initialization; when they get the lock, they will
281
+ # find there is nothing left to do.
282
+ if is_initialized():
283
+ return
284
+ # It is important to prevent other threads from entering _lazy_init
285
+ # immediately, while we are still guaranteed to have the GIL, because some
286
+ # of the C calls we make below will release the GIL
287
+ if _is_in_bad_fork():
288
+ raise RuntimeError(
289
+ "Cannot re-initialize CUDA in forked subprocess. To use CUDA with "
290
+ "multiprocessing, you must use the 'spawn' start method"
291
+ )
292
+ if not hasattr(torch._C, "_cuda_getDeviceCount"):
293
+ raise AssertionError("Torch not compiled with CUDA enabled")
294
+ if _cudart is None:
295
+ raise AssertionError(
296
+ "libcudart functions unavailable. It looks like you have a broken build?"
297
+ )
298
+ # This function throws if there's a driver initialization error, no GPUs
299
+ # are found or any other error occurs
300
+ if "CUDA_MODULE_LOADING" not in os.environ:
301
+ os.environ["CUDA_MODULE_LOADING"] = "LAZY"
302
+ torch._C._cuda_init()
303
+ # Some of the queued calls may reentrantly call _lazy_init();
304
+ # we need to just return without initializing in that case.
305
+ # However, we must not let any *other* threads in!
306
+ _tls.is_initializing = True
307
+
308
+ for calls in _lazy_seed_tracker.get_calls():
309
+ if calls:
310
+ _queued_calls.append(calls)
311
+
312
+ try:
313
+ for queued_call, orig_traceback in _queued_calls:
314
+ try:
315
+ queued_call()
316
+ except Exception as e:
317
+ msg = (
318
+ f"CUDA call failed lazily at initialization with error: {str(e)}\n\n"
319
+ f"CUDA call was originally invoked at:\n\n{''.join(orig_traceback)}"
320
+ )
321
+ raise DeferredCudaCallError(msg) from e
322
+ finally:
323
+ delattr(_tls, "is_initializing")
324
+ _initialized = True
325
+
326
+
327
+ def cudart():
328
+ _lazy_init()
329
+ return _cudart
330
+
331
+
332
+ class cudaStatus:
333
+ SUCCESS: int = 0
334
+ ERROR_NOT_READY: int = 34
335
+
336
+
337
+ class CudaError(RuntimeError):
338
+ def __init__(self, code: int) -> None:
339
+ msg = _cudart.cudaGetErrorString(_cudart.cudaError(code))
340
+ super().__init__(f"{msg} ({code})")
341
+
342
+
343
+ def check_error(res: int) -> None:
344
+ if res != _cudart.cudaError.success:
345
+ raise CudaError(res)
346
+
347
+
348
+ class _DeviceGuard:
349
+ def __init__(self, index: int):
350
+ self.idx = index
351
+ self.prev_idx = -1
352
+
353
+ def __enter__(self):
354
+ self.prev_idx = torch.cuda._exchange_device(self.idx)
355
+
356
+ def __exit__(self, type: Any, value: Any, traceback: Any):
357
+ self.idx = torch.cuda._maybe_exchange_device(self.prev_idx)
358
+ return False
359
+
360
+
361
+ class device:
362
+ r"""Context-manager that changes the selected device.
363
+
364
+ Args:
365
+ device (torch.device or int): device index to select. It's a no-op if
366
+ this argument is a negative integer or ``None``.
367
+ """
368
+
369
+ def __init__(self, device: Any):
370
+ self.idx = _get_device_index(device, optional=True)
371
+ self.prev_idx = -1
372
+
373
+ def __enter__(self):
374
+ self.prev_idx = torch.cuda._exchange_device(self.idx)
375
+
376
+ def __exit__(self, type: Any, value: Any, traceback: Any):
377
+ self.idx = torch.cuda._maybe_exchange_device(self.prev_idx)
378
+ return False
379
+
380
+
381
+ class device_of(device):
382
+ r"""Context-manager that changes the current device to that of given object.
383
+
384
+ You can use both tensors and storages as arguments. If a given object is
385
+ not allocated on a GPU, this is a no-op.
386
+
387
+ Args:
388
+ obj (Tensor or Storage): object allocated on the selected device.
389
+ """
390
+
391
+ def __init__(self, obj):
392
+ idx = obj.get_device() if obj.is_cuda else -1
393
+ super().__init__(idx)
394
+
395
+
396
+ def set_device(device: _device_t) -> None:
397
+ r"""Set the current device.
398
+
399
+ Usage of this function is discouraged in favor of :any:`device`. In most
400
+ cases it's better to use ``CUDA_VISIBLE_DEVICES`` environmental variable.
401
+
402
+ Args:
403
+ device (torch.device or int): selected device. This function is a no-op
404
+ if this argument is negative.
405
+ """
406
+ device = _get_device_index(device)
407
+ if device >= 0:
408
+ torch._C._cuda_setDevice(device)
409
+
410
+
411
+ def get_device_name(device: Optional[_device_t] = None) -> str:
412
+ r"""Get the name of a device.
413
+
414
+ Args:
415
+ device (torch.device or int, optional): device for which to return the
416
+ name. This function is a no-op if this argument is a negative
417
+ integer. It uses the current device, given by :func:`~torch.cuda.current_device`,
418
+ if :attr:`device` is ``None`` (default).
419
+
420
+ Returns:
421
+ str: the name of the device
422
+ """
423
+ return get_device_properties(device).name
424
+
425
+
426
+ def get_device_capability(device: Optional[_device_t] = None) -> Tuple[int, int]:
427
+ r"""Get the cuda capability of a device.
428
+
429
+ Args:
430
+ device (torch.device or int, optional): device for which to return the
431
+ device capability. This function is a no-op if this argument is
432
+ a negative integer. It uses the current device, given by
433
+ :func:`~torch.cuda.current_device`, if :attr:`device` is ``None``
434
+ (default).
435
+
436
+ Returns:
437
+ tuple(int, int): the major and minor cuda capability of the device
438
+ """
439
+ prop = get_device_properties(device)
440
+ return prop.major, prop.minor
441
+
442
+
443
+ def get_device_properties(device: _device_t) -> _CudaDeviceProperties:
444
+ r"""Get the properties of a device.
445
+
446
+ Args:
447
+ device (torch.device or int or str): device for which to return the
448
+ properties of the device.
449
+
450
+ Returns:
451
+ _CudaDeviceProperties: the properties of the device
452
+ """
453
+ _lazy_init() # will define _get_device_properties
454
+ device = _get_device_index(device, optional=True)
455
+ if device < 0 or device >= device_count():
456
+ raise AssertionError("Invalid device id")
457
+ return _get_device_properties(device) # type: ignore[name-defined]
458
+
459
+
460
+ def can_device_access_peer(device: _device_t, peer_device: _device_t) -> bool:
461
+ r"""Check if peer access between two devices is possible."""
462
+ _lazy_init()
463
+ device = _get_device_index(device, optional=True)
464
+ peer_device = _get_device_index(peer_device)
465
+ if device < 0 or device >= device_count():
466
+ raise AssertionError("Invalid device id")
467
+ if peer_device < 0 or peer_device >= device_count():
468
+ raise AssertionError("Invalid peer device id")
469
+ return torch._C._cuda_canDeviceAccessPeer(device, peer_device)
470
+
471
+
472
+ class StreamContext:
473
+ r"""Context-manager that selects a given stream.
474
+
475
+ All CUDA kernels queued within its context will be enqueued on a selected
476
+ stream.
477
+
478
+ Args:
479
+ Stream (Stream): selected stream. This manager is a no-op if it's
480
+ ``None``.
481
+ .. note:: Streams are per-device.
482
+ """
483
+ cur_stream: Optional["torch.cuda.Stream"]
484
+
485
+ def __init__(self, stream: Optional["torch.cuda.Stream"]):
486
+ self.stream = stream
487
+ self.idx = _get_device_index(None, True)
488
+ if not torch.jit.is_scripting():
489
+ if self.idx is None:
490
+ self.idx = -1
491
+
492
+ self.src_prev_stream = (
493
+ None if not torch.jit.is_scripting() else torch.cuda.default_stream(None)
494
+ )
495
+ self.dst_prev_stream = (
496
+ None if not torch.jit.is_scripting() else torch.cuda.default_stream(None)
497
+ )
498
+
499
+ def __enter__(self):
500
+ # Local cur_stream variable for type refinement
501
+ cur_stream = self.stream
502
+ # Return if stream is None or CUDA device not available
503
+ if cur_stream is None or self.idx == -1:
504
+ return
505
+ self.src_prev_stream = torch.cuda.current_stream(None)
506
+
507
+ # If the stream is not on the current device, then
508
+ # set the current stream on the device
509
+ if self.src_prev_stream.device != cur_stream.device:
510
+ with device(cur_stream.device):
511
+ self.dst_prev_stream = torch.cuda.current_stream(cur_stream.device)
512
+ torch.cuda.set_stream(cur_stream)
513
+
514
+ def __exit__(self, type: Any, value: Any, traceback: Any):
515
+ # Local cur_stream variable for type refinement
516
+ cur_stream = self.stream
517
+ # If stream is None or no CUDA device available, return
518
+ if cur_stream is None or self.idx == -1:
519
+ return
520
+
521
+ # Reset the stream on the original device
522
+ # and destination device
523
+ if self.src_prev_stream.device != cur_stream.device: # type: ignore[union-attr]
524
+ torch.cuda.set_stream(self.dst_prev_stream) # type: ignore[arg-type]
525
+ torch.cuda.set_stream(self.src_prev_stream) # type: ignore[arg-type]
526
+
527
+
528
+ def stream(stream: Optional["torch.cuda.Stream"]) -> StreamContext:
529
+ r"""Wrap around the Context-manager StreamContext that selects a given stream.
530
+
531
+ Arguments:
532
+ stream (Stream): selected stream. This manager is a no-op if it's
533
+ ``None``.
534
+ ..Note:: In eager mode stream is of type Stream class while in JIT it is
535
+ an object of the custom class ``torch.classes.cuda.Stream``.
536
+ """
537
+ return StreamContext(stream)
538
+
539
+
540
+ def _set_stream_by_id(stream_id, device_index, device_type):
541
+ r"""set stream specified by the stream id, device index and
542
+ device type
543
+
544
+ Args: stream_id (int): stream id in stream pool
545
+ device_index (int): device index in topo
546
+ device_type (int): enum device type
547
+ """
548
+ torch._C._cuda_setStream(
549
+ stream_id=stream_id,
550
+ device_index=device_index,
551
+ device_type=device_type,
552
+ )
553
+
554
+
555
+ def set_stream(stream: Stream):
556
+ r"""Set the current stream.This is a wrapper API to set the stream.
557
+ Usage of this function is discouraged in favor of the ``stream``
558
+ context manager.
559
+
560
+ Args:
561
+ stream (Stream): selected stream. This function is a no-op
562
+ if this argument is ``None``.
563
+ """
564
+ if stream is None:
565
+ return
566
+ _set_stream_by_id(
567
+ stream_id=stream.stream_id,
568
+ device_index=stream.device_index,
569
+ device_type=stream.device_type,
570
+ )
571
+
572
+
573
+ def _parse_visible_devices() -> Union[List[int], List[str]]:
574
+ r"""Parse CUDA_VISIBLE_DEVICES environment variable."""
575
+ var = os.getenv("CUDA_VISIBLE_DEVICES")
576
+ if var is None:
577
+ return list(range(64))
578
+
579
+ def _strtoul(s: str) -> int:
580
+ """Return -1 or positive integer sequence string starts with."""
581
+ if not s:
582
+ return -1
583
+ for idx, c in enumerate(s):
584
+ if not (c.isdigit() or (idx == 0 and c in "+-")):
585
+ break
586
+ if idx + 1 == len(s):
587
+ idx += 1
588
+ return int(s[:idx]) if idx > 0 else -1
589
+
590
+ def parse_list_with_prefix(lst: str, prefix: str) -> List[str]:
591
+ rcs: List[str] = []
592
+ for elem in lst.split(","):
593
+ # Repeated id results in empty set
594
+ if elem in rcs:
595
+ return cast(List[str], [])
596
+ # Anything other but prefix is ignored
597
+ if not elem.startswith(prefix):
598
+ break
599
+ rcs.append(elem)
600
+ return rcs
601
+
602
+ if var.startswith("GPU-"):
603
+ return parse_list_with_prefix(var, "GPU-")
604
+ if var.startswith("MIG-"):
605
+ return parse_list_with_prefix(var, "MIG-")
606
+ # CUDA_VISIBLE_DEVICES uses something like strtoul
607
+ # which makes `1gpu2,2ampere` is equivalent to `1,2`
608
+ rc: List[int] = []
609
+ for elem in var.split(","):
610
+ x = _strtoul(elem.strip())
611
+ # Repeated ordinal results in empty set
612
+ if x in rc:
613
+ return cast(List[int], [])
614
+ # Negative value aborts the sequence
615
+ if x < 0:
616
+ break
617
+ rc.append(x)
618
+ return rc
619
+
620
+
621
+ def _raw_device_count_nvml() -> int:
622
+ r"""Return number of devices as reported by NVML or negative value if NVML discovery/initialization failed."""
623
+ from ctypes import byref, c_int, CDLL
624
+
625
+ nvml_h = CDLL("libnvidia-ml.so.1")
626
+ rc = nvml_h.nvmlInit()
627
+ if rc != 0:
628
+ warnings.warn("Can't initialize NVML")
629
+ return -1
630
+ dev_count = c_int(-1)
631
+ rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count))
632
+ if rc != 0:
633
+ warnings.warn("Can't get nvml device count")
634
+ return -1
635
+ del nvml_h
636
+ return dev_count.value
637
+
638
+
639
+ def _raw_device_uuid_nvml() -> Optional[List[str]]:
640
+ r"""Return list of device UUID as reported by NVML or None if NVM discovery/initialization failed."""
641
+ from ctypes import byref, c_int, c_void_p, CDLL, create_string_buffer
642
+
643
+ nvml_h = CDLL("libnvidia-ml.so.1")
644
+ rc = nvml_h.nvmlInit()
645
+ if rc != 0:
646
+ warnings.warn("Can't initialize NVML")
647
+ return None
648
+ dev_count = c_int(-1)
649
+ rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count))
650
+ if rc != 0:
651
+ warnings.warn("Can't get nvml device count")
652
+ return None
653
+ uuids: List[str] = []
654
+ for idx in range(dev_count.value):
655
+ dev_id = c_void_p()
656
+ rc = nvml_h.nvmlDeviceGetHandleByIndex_v2(idx, byref(dev_id))
657
+ if rc != 0:
658
+ warnings.warn("Can't get device handle")
659
+ return None
660
+ buf_len = 96
661
+ buf = create_string_buffer(buf_len)
662
+ rc = nvml_h.nvmlDeviceGetUUID(dev_id, buf, buf_len)
663
+ if rc != 0:
664
+ warnings.warn("Can't get device UUID")
665
+ return None
666
+ uuids.append(buf.raw.decode("ascii").strip("\0"))
667
+ del nvml_h
668
+ return uuids
669
+
670
+
671
+ def _transform_uuid_to_ordinals(candidates: List[str], uuids: List[str]) -> List[int]:
672
+ r"""Given the set of partial uuids and list of known uuids builds a set of ordinals excluding ambiguous partials IDs."""
673
+
674
+ def uuid_to_orinal(candidate: str, uuids: List[str]) -> int:
675
+ best_match = -1
676
+ for idx, uuid in enumerate(uuids):
677
+ if not uuid.startswith(candidate):
678
+ continue
679
+ # Ambiguous candidate
680
+ if best_match != -1:
681
+ return -1
682
+ best_match = idx
683
+ return best_match
684
+
685
+ rc: List[int] = []
686
+ for candidate in candidates:
687
+ idx = uuid_to_orinal(candidate, uuids)
688
+ # First invalid ordinal stops parsing
689
+ if idx < 0:
690
+ break
691
+ # Duplicates result in empty set
692
+ if idx in rc:
693
+ return cast(List[int], [])
694
+ rc.append(idx)
695
+ return rc
696
+
697
+
698
+ def _device_count_nvml() -> int:
699
+ r"""Return number of devices as reported by NVML taking CUDA_VISIBLE_DEVICES into account.
700
+
701
+ Negative value is returned if NVML discovery or initialization has failed.
702
+ """
703
+ visible_devices = _parse_visible_devices()
704
+ if not visible_devices:
705
+ return 0
706
+ try:
707
+ if type(visible_devices[0]) is str:
708
+ # Skip MIG parsing
709
+ if visible_devices[0].startswith("MIG-"):
710
+ return -1
711
+ uuids = _raw_device_uuid_nvml()
712
+ if uuids is None:
713
+ return -1
714
+ visible_devices = _transform_uuid_to_ordinals(
715
+ cast(List[str], visible_devices), uuids
716
+ )
717
+ else:
718
+ raw_cnt = _raw_device_count_nvml()
719
+ if raw_cnt <= 0:
720
+ return raw_cnt
721
+ # Trim the list up to a maximum available device
722
+ for idx, val in enumerate(visible_devices):
723
+ if cast(int, val) >= raw_cnt:
724
+ return idx
725
+ except OSError:
726
+ return -1
727
+ except AttributeError:
728
+ return -1
729
+ return len(visible_devices)
730
+
731
+
732
+ def _get_nvml_device_index(device: Optional[Union[int, Device]]) -> int:
733
+ r"""Return the NVML index of the device, taking CUDA_VISIBLE_DEVICES into account."""
734
+ idx = _get_device_index(device, optional=True)
735
+ visible_devices = _parse_visible_devices()
736
+ if type(visible_devices[0]) is str:
737
+ uuids = _raw_device_uuid_nvml()
738
+ if uuids is None:
739
+ raise RuntimeError("Can't get device UUIDs")
740
+ visible_devices = _transform_uuid_to_ordinals(
741
+ cast(List[str], visible_devices), uuids
742
+ )
743
+ idx_map = dict(enumerate(cast(List[int], visible_devices)))
744
+ if idx not in idx_map:
745
+ raise RuntimeError(
746
+ f"device {idx} is not visible (CUDA_VISIBLE_DEVICES={visible_devices})"
747
+ )
748
+ return idx_map[idx]
749
+
750
+
751
+ @lru_cache(maxsize=1)
752
+ def device_count() -> int:
753
+ r"""Return the number of GPUs available."""
754
+ if not _is_compiled():
755
+ return 0
756
+ # bypass _device_count_nvml() if rocm (not supported)
757
+ nvml_count = -1 if torch.version.hip else _device_count_nvml()
758
+ return torch._C._cuda_getDeviceCount() if nvml_count < 0 else nvml_count
759
+
760
+
761
+ def get_arch_list() -> List[str]:
762
+ r"""Return list CUDA architectures this library was compiled for."""
763
+ if not is_available():
764
+ return []
765
+ arch_flags = torch._C._cuda_getArchFlags()
766
+ if arch_flags is None:
767
+ return []
768
+ return arch_flags.split()
769
+
770
+
771
+ def get_gencode_flags() -> str:
772
+ r"""Return NVCC gencode flags this library was compiled with."""
773
+ arch_list = get_arch_list()
774
+ if len(arch_list) == 0:
775
+ return ""
776
+ arch_list_ = [arch.split("_") for arch in arch_list]
777
+ return " ".join(
778
+ [
779
+ f"-gencode compute=compute_{arch},code={kind}_{arch}"
780
+ for (kind, arch) in arch_list_
781
+ ]
782
+ )
783
+
784
+
785
+ def current_device() -> int:
786
+ r"""Return the index of a currently selected device."""
787
+ _lazy_init()
788
+ return torch._C._cuda_getDevice()
789
+
790
+
791
+ def synchronize(device: _device_t = None) -> None:
792
+ r"""Wait for all kernels in all streams on a CUDA device to complete.
793
+
794
+ Args:
795
+ device (torch.device or int, optional): device for which to synchronize.
796
+ It uses the current device, given by :func:`~torch.cuda.current_device`,
797
+ if :attr:`device` is ``None`` (default).
798
+ """
799
+ _lazy_init()
800
+ with torch.cuda.device(device):
801
+ return torch._C._cuda_synchronize()
802
+
803
+
804
+ def ipc_collect():
805
+ r"""Force collects GPU memory after it has been released by CUDA IPC.
806
+
807
+ .. note::
808
+ Checks if any sent CUDA tensors could be cleaned from the memory. Force
809
+ closes shared memory file used for reference counting if there is no
810
+ active counters. Useful when the producer process stopped actively sending
811
+ tensors and want to release unused memory.
812
+ """
813
+ _lazy_init()
814
+ return torch._C._cuda_ipc_collect()
815
+
816
+
817
+ def current_stream(device: Optional[_device_t] = None) -> Stream:
818
+ r"""Return the currently selected :class:`Stream` for a given device.
819
+
820
+ Args:
821
+ device (torch.device or int, optional): selected device. Returns
822
+ the currently selected :class:`Stream` for the current device, given
823
+ by :func:`~torch.cuda.current_device`, if :attr:`device` is ``None``
824
+ (default).
825
+ """
826
+ _lazy_init()
827
+ streamdata = torch._C._cuda_getCurrentStream(
828
+ _get_device_index(device, optional=True)
829
+ )
830
+ return Stream(
831
+ stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2]
832
+ )
833
+
834
+
835
+ def default_stream(device: Optional[_device_t] = None) -> Stream:
836
+ r"""Return the default :class:`Stream` for a given device.
837
+
838
+ Args:
839
+ device (torch.device or int, optional): selected device. Returns
840
+ the default :class:`Stream` for the current device, given by
841
+ :func:`~torch.cuda.current_device`, if :attr:`device` is ``None``
842
+ (default).
843
+ """
844
+ _lazy_init()
845
+ streamdata = torch._C._cuda_getDefaultStream(
846
+ _get_device_index(device, optional=True)
847
+ )
848
+ return Stream(
849
+ stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2]
850
+ )
851
+
852
+
853
+ def current_blas_handle():
854
+ r"""Return cublasHandle_t pointer to current cuBLAS handle"""
855
+ _lazy_init()
856
+ return torch._C._cuda_getCurrentBlasHandle()
857
+
858
+
859
+ def set_sync_debug_mode(debug_mode: Union[int, str]) -> None:
860
+ r"""Set the debug mode for cuda synchronizing operations.
861
+
862
+ Args:
863
+ debug_mode(str or int): if "default" or 0, don't error or warn on synchronizing operations,
864
+ if "warn" or 1, warn on synchronizing operations, if "error" or 2, error out synchronizing operations.
865
+
866
+ Warning:
867
+ This is an experimental feature, and not all synchronizing operations will trigger warning or error. In
868
+ particular, operations in torch.distributed and torch.sparse namespaces are not covered yet.
869
+ """
870
+ _lazy_init()
871
+ if isinstance(debug_mode, str):
872
+ if debug_mode == "default":
873
+ debug_mode = 0
874
+ elif debug_mode == "warn":
875
+ debug_mode = 1
876
+ elif debug_mode == "error":
877
+ debug_mode = 2
878
+ else:
879
+ raise RuntimeError(
880
+ "invalid value of debug_mode, expected one of `default`, `warn`, `error`"
881
+ )
882
+
883
+ torch._C._cuda_set_sync_debug_mode(debug_mode)
884
+
885
+
886
+ def get_sync_debug_mode() -> int:
887
+ r"""Return current value of debug mode for cuda synchronizing operations."""
888
+ _lazy_init()
889
+ return torch._C._cuda_get_sync_debug_mode()
890
+
891
+
892
+ def _get_pynvml_handler(device: Optional[Union[Device, int]] = None):
893
+ if not _HAS_PYNVML:
894
+ raise ModuleNotFoundError(
895
+ "pynvml does not seem to be installed or it can't be imported."
896
+ ) from _PYNVML_ERR
897
+ from pynvml import NVMLError_DriverNotLoaded
898
+
899
+ try:
900
+ pynvml.nvmlInit()
901
+ except NVMLError_DriverNotLoaded as e:
902
+ raise RuntimeError("cuda driver can't be loaded, is cuda enabled?") from e
903
+
904
+ device = _get_nvml_device_index(device)
905
+ handle = pynvml.nvmlDeviceGetHandleByIndex(device)
906
+ return handle
907
+
908
+
909
+ def memory_usage(device: Optional[Union[Device, int]] = None) -> int:
910
+ r"""Return the percent of time over the past sample period during which global (device)
911
+ memory was being read or written as given by `nvidia-smi`.
912
+
913
+ Args:
914
+ device (torch.device or int, optional): selected device. Returns
915
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
916
+ if :attr:`device` is ``None`` (default).
917
+
918
+ Warning: Each sample period may be between 1 second and 1/6 second,
919
+ depending on the product being queried.
920
+ """
921
+ handle = _get_pynvml_handler()
922
+
923
+ device = _get_nvml_device_index(device)
924
+ handle = pynvml.nvmlDeviceGetHandleByIndex(device)
925
+ return pynvml.nvmlDeviceGetUtilizationRates(handle).memory
926
+
927
+
928
+ def utilization(device: Optional[Union[Device, int]] = None) -> int:
929
+ r"""Return the percent of time over the past sample period during which one or
930
+ more kernels was executing on the GPU as given by `nvidia-smi`.
931
+
932
+ Args:
933
+ device (torch.device or int, optional): selected device. Returns
934
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
935
+ if :attr:`device` is ``None`` (default).
936
+
937
+ Warning: Each sample period may be between 1 second and 1/6 second,
938
+ depending on the product being queried.
939
+ """
940
+ handle = _get_pynvml_handler(device)
941
+ device = _get_nvml_device_index(device)
942
+ handle = pynvml.nvmlDeviceGetHandleByIndex(device)
943
+ return pynvml.nvmlDeviceGetUtilizationRates(handle).gpu
944
+
945
+
946
+ def temperature(device: Optional[Union[Device, int]] = None) -> int:
947
+ r"""Return the average temperature of the GPU sensor in Degrees C (Centigrades).
948
+
949
+ The average temperature is computed based on past sample period as given by `nvidia-smi`.
950
+
951
+ Args:
952
+ device (torch.device or int, optional): selected device. Returns
953
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
954
+ if :attr:`device` is ``None`` (default).
955
+
956
+ Warning: Each sample period may be between 1 second and 1/6 second,
957
+ depending on the product being queried.
958
+ """
959
+ handle = _get_pynvml_handler(device)
960
+ # 0 refers to the temperature sensor for the GPU die.
961
+ return pynvml.nvmlDeviceGetTemperature(handle, 0)
962
+
963
+
964
+ def power_draw(device: Optional[Union[Device, int]] = None) -> int:
965
+ r"""Return the average power draw of the GPU sensor in mW (MilliWatts)
966
+ over the past sample period as given by `nvidia-smi` for Fermi or newer fully supported devices.
967
+
968
+ Args:
969
+ device (torch.device or int, optional): selected device. Returns
970
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
971
+ if :attr:`device` is ``None`` (default).
972
+
973
+ Warning: Each sample period may be between 1 second and 1/6 second,
974
+ depending on the product being queried.
975
+ """
976
+ handle = _get_pynvml_handler(device)
977
+ return pynvml.nvmlDeviceGetPowerUsage(handle)
978
+
979
+
980
+ def clock_rate(device: Optional[Union[Device, int]] = None) -> int:
981
+ r"""Return the clock speed of the GPU SM in Hz Hertz over the past sample period as given by `nvidia-smi`.
982
+
983
+ Args:
984
+ device (torch.device or int, optional): selected device. Returns
985
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
986
+ if :attr:`device` is ``None`` (default).
987
+
988
+ Warning: Each sample period may be between 1 second and 1/6 second,
989
+ depending on the product being queried.
990
+ """
991
+ handle = _get_pynvml_handler(device)
992
+ return pynvml.nvmlDeviceGetClockInfo(handle, 1)
993
+
994
+
995
+ def _get_device(device: Union[int, str, torch.device]) -> torch.device:
996
+ r"""Return the torch.device type object from the passed in device.
997
+
998
+ Args:
999
+ device (torch.device or int): selected device.
1000
+ """
1001
+ if isinstance(device, str):
1002
+ device = torch.device(device)
1003
+ elif isinstance(device, int):
1004
+ device = torch.device("cuda", device)
1005
+ return device
1006
+
1007
+
1008
+ def _get_generator(device: torch.device) -> torch._C.Generator:
1009
+ r"""Return the CUDA Generator object for the given device.
1010
+
1011
+ Args:
1012
+ device (torch.device): selected device.
1013
+ """
1014
+ idx = device.index
1015
+ if idx is None:
1016
+ idx = current_device()
1017
+ return torch.cuda.default_generators[idx]
1018
+
1019
+
1020
+ def _set_rng_state_offset(
1021
+ offset: int, device: Union[int, str, torch.device] = "cuda"
1022
+ ) -> None:
1023
+ r"""Set the random number generator state offset of the specified GPU.
1024
+
1025
+ Args:
1026
+ offset (int): The desired offset
1027
+ device (torch.device or int, optional): The device to set the RNG state.
1028
+ Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device).
1029
+ """
1030
+ final_device = _get_device(device)
1031
+
1032
+ def cb():
1033
+ default_generator = _get_generator(final_device)
1034
+ default_generator.set_offset(offset)
1035
+
1036
+ _lazy_call(cb)
1037
+
1038
+
1039
+ def _get_rng_state_offset(device: Union[int, str, torch.device] = "cuda") -> int:
1040
+ r"""Return the random number generator state offset of the specified GPU.
1041
+
1042
+ Args:
1043
+ device (torch.device or int, optional): The device to return the RNG state offset of.
1044
+ Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device).
1045
+
1046
+ .. warning::
1047
+ This function eagerly initializes CUDA.
1048
+ """
1049
+ _lazy_init()
1050
+ final_device = _get_device(device)
1051
+ default_generator = _get_generator(final_device)
1052
+ return default_generator.get_offset()
1053
+
1054
+
1055
+ from .memory import * # noqa: F403
1056
+
1057
+
1058
+ from .random import * # noqa: F403
1059
+
1060
+ ################################################################################
1061
+ # Define Storage and Tensor classes
1062
+ ################################################################################
1063
+
1064
+
1065
+ @staticmethod # type: ignore[misc]
1066
+ def _lazy_new(cls, *args, **kwargs):
1067
+ _lazy_init()
1068
+ # We may need to call lazy init again if we are a forked child
1069
+ # del _CudaBase.__new__
1070
+ return super(_CudaBase, cls).__new__(cls, *args, **kwargs)
1071
+
1072
+
1073
+ class _CudaBase:
1074
+ is_cuda = True
1075
+ is_sparse = False
1076
+
1077
+ def type(self, *args, **kwargs):
1078
+ # We could use a Protocol here to tell mypy that self has `get_device` method
1079
+ # but it is only available in the typing module on Python >= 3.8
1080
+ # or on typing_extensions module on Python >= 3.6
1081
+ with device(self.get_device()): # type: ignore[attr-defined]
1082
+ return super().type(*args, **kwargs) # type: ignore[misc]
1083
+
1084
+ __new__ = _lazy_new
1085
+
1086
+
1087
+ from torch.storage import _LegacyStorage, _warn_typed_storage_removal
1088
+
1089
+
1090
+ class _CudaLegacyStorage(_LegacyStorage):
1091
+ @classmethod
1092
+ def from_buffer(cls, *args, **kwargs):
1093
+ _warn_typed_storage_removal()
1094
+ raise RuntimeError("from_buffer: Not available for CUDA storage")
1095
+
1096
+ @classmethod
1097
+ def _new_with_weak_ptr(cls, *args, **kwargs):
1098
+ raise RuntimeError("_new_with_weak_ptr: Not available for CUDA storage")
1099
+
1100
+ @classmethod
1101
+ def _new_shared_filename(cls, manager, obj, size, *, device=None, dtype=None):
1102
+ raise RuntimeError("_new_shared_filename: Not available for CUDA storage")
1103
+
1104
+
1105
+ class ByteStorage(_CudaLegacyStorage):
1106
+ @classproperty
1107
+ def dtype(self):
1108
+ _warn_typed_storage_removal()
1109
+ return self._dtype
1110
+
1111
+ @classproperty
1112
+ def _dtype(self):
1113
+ return torch.uint8
1114
+
1115
+
1116
+ class DoubleStorage(_CudaLegacyStorage):
1117
+ @classproperty
1118
+ def dtype(self):
1119
+ _warn_typed_storage_removal()
1120
+ return self._dtype
1121
+
1122
+ @classproperty
1123
+ def _dtype(self):
1124
+ return torch.double
1125
+
1126
+
1127
+ class FloatStorage(_CudaLegacyStorage):
1128
+ @classproperty
1129
+ def dtype(self):
1130
+ _warn_typed_storage_removal()
1131
+ return self._dtype
1132
+
1133
+ @classproperty
1134
+ def _dtype(self):
1135
+ return torch.float
1136
+
1137
+
1138
+ class HalfStorage(_CudaLegacyStorage):
1139
+ @classproperty
1140
+ def dtype(self):
1141
+ _warn_typed_storage_removal()
1142
+ return self._dtype
1143
+
1144
+ @classproperty
1145
+ def _dtype(self):
1146
+ return torch.half
1147
+
1148
+
1149
+ class LongStorage(_CudaLegacyStorage):
1150
+ @classproperty
1151
+ def dtype(self):
1152
+ _warn_typed_storage_removal()
1153
+ return self._dtype
1154
+
1155
+ @classproperty
1156
+ def _dtype(self):
1157
+ return torch.long
1158
+
1159
+
1160
+ class IntStorage(_CudaLegacyStorage):
1161
+ @classproperty
1162
+ def dtype(self):
1163
+ _warn_typed_storage_removal()
1164
+ return self._dtype
1165
+
1166
+ @classproperty
1167
+ def _dtype(self):
1168
+ return torch.int
1169
+
1170
+
1171
+ class ShortStorage(_CudaLegacyStorage):
1172
+ @classproperty
1173
+ def dtype(self):
1174
+ _warn_typed_storage_removal()
1175
+ return self._dtype
1176
+
1177
+ @classproperty
1178
+ def _dtype(self):
1179
+ return torch.short
1180
+
1181
+
1182
+ class CharStorage(_CudaLegacyStorage):
1183
+ @classproperty
1184
+ def dtype(self):
1185
+ _warn_typed_storage_removal()
1186
+ return self._dtype
1187
+
1188
+ @classproperty
1189
+ def _dtype(self):
1190
+ return torch.int8
1191
+
1192
+
1193
+ class BoolStorage(_CudaLegacyStorage):
1194
+ @classproperty
1195
+ def dtype(self):
1196
+ _warn_typed_storage_removal()
1197
+ return self._dtype
1198
+
1199
+ @classproperty
1200
+ def _dtype(self):
1201
+ return torch.bool
1202
+
1203
+
1204
+ class BFloat16Storage(_CudaLegacyStorage):
1205
+ @classproperty
1206
+ def dtype(self):
1207
+ _warn_typed_storage_removal()
1208
+ return self._dtype
1209
+
1210
+ @classproperty
1211
+ def _dtype(self):
1212
+ return torch.bfloat16
1213
+
1214
+
1215
+ class ComplexDoubleStorage(_CudaLegacyStorage):
1216
+ @classproperty
1217
+ def dtype(self):
1218
+ _warn_typed_storage_removal()
1219
+ return self._dtype
1220
+
1221
+ @classproperty
1222
+ def _dtype(self):
1223
+ return torch.cdouble
1224
+
1225
+
1226
+ class ComplexFloatStorage(_CudaLegacyStorage):
1227
+ @classproperty
1228
+ def dtype(self):
1229
+ _warn_typed_storage_removal()
1230
+ return self._dtype
1231
+
1232
+ @classproperty
1233
+ def _dtype(self):
1234
+ return torch.cfloat
1235
+
1236
+
1237
+ del _LegacyStorage
1238
+ del _CudaLegacyStorage
1239
+
1240
+ torch._storage_classes.add(DoubleStorage)
1241
+ torch._storage_classes.add(FloatStorage)
1242
+ torch._storage_classes.add(LongStorage)
1243
+ torch._storage_classes.add(IntStorage)
1244
+ torch._storage_classes.add(ShortStorage)
1245
+ torch._storage_classes.add(CharStorage)
1246
+ torch._storage_classes.add(ByteStorage)
1247
+ torch._storage_classes.add(HalfStorage)
1248
+ torch._storage_classes.add(BoolStorage)
1249
+ torch._storage_classes.add(BFloat16Storage)
1250
+ torch._storage_classes.add(ComplexDoubleStorage)
1251
+ torch._storage_classes.add(ComplexFloatStorage)
1252
+
1253
+
1254
+ class _WrappedTritonKernel:
1255
+ """Just a simple wrapper to store some metadata for testing purposes."""
1256
+
1257
+ def __init__(self, kernel):
1258
+ self.kernel = kernel
1259
+ self.kernel_invoked = False
1260
+
1261
+ def __call__(self, *args, **kwargs):
1262
+ res = self.kernel(*args, **kwargs)
1263
+ self.kernel_invoked = True
1264
+ return res
1265
+
1266
+
1267
+ def _register_triton_kernels():
1268
+ if torch._running_with_deploy():
1269
+ return
1270
+
1271
+ @_WrappedTritonKernel
1272
+ def kernel_impl(*args, **kwargs):
1273
+ from torch.sparse._triton_ops import bsr_dense_mm
1274
+
1275
+ return bsr_dense_mm(*args, skip_checks=True, **kwargs)
1276
+
1277
+ @_WrappedTritonKernel
1278
+ def addmm_kernel_impl(*args, **kwargs):
1279
+ from torch.sparse._triton_ops import bsr_dense_addmm
1280
+
1281
+ return bsr_dense_addmm(*args, skip_checks=True, **kwargs)
1282
+
1283
+ has_triton = importlib.util.find_spec("triton") is not None
1284
+ if has_triton:
1285
+ torch._TritonLibrary.registerOp(
1286
+ "_triton_bsr_dense_mm_out",
1287
+ "_triton_bsr_dense_mm_out(Tensor bsr, Tensor dense, *, Tensor(a!) out) -> Tensor(a!)",
1288
+ kernel_impl,
1289
+ "SparseCsrCUDA",
1290
+ )
1291
+
1292
+ torch._TritonLibrary.registerOp(
1293
+ "_triton_bsr_dense_addmm_out",
1294
+ (
1295
+ "_triton_bsr_dense_addmm_out(Tensor input, Tensor bsr, Tensor dense,"
1296
+ " *, Scalar beta, Scalar alpha, Tensor(a!) out) -> Tensor(a!)"
1297
+ ),
1298
+ addmm_kernel_impl,
1299
+ "SparseCsrCUDA",
1300
+ )
1301
+
1302
+
1303
+ _lazy_call(_register_triton_kernels)
1304
+
1305
+
1306
+ from . import amp, jiterator, nvtx, profiler, sparse
1307
+
1308
+ __all__ = [
1309
+ # Typed storage and tensors
1310
+ "BFloat16Storage",
1311
+ "BFloat16Tensor",
1312
+ "BoolStorage",
1313
+ "BoolTensor",
1314
+ "ByteStorage",
1315
+ "ByteTensor",
1316
+ "CharStorage",
1317
+ "CharTensor",
1318
+ "ComplexDoubleStorage",
1319
+ "ComplexFloatStorage",
1320
+ "DoubleStorage",
1321
+ "DoubleTensor",
1322
+ "FloatStorage",
1323
+ "FloatTensor",
1324
+ "HalfStorage",
1325
+ "HalfTensor",
1326
+ "IntStorage",
1327
+ "IntTensor",
1328
+ "LongStorage",
1329
+ "LongTensor",
1330
+ "ShortStorage",
1331
+ "ShortTensor",
1332
+ "CUDAGraph",
1333
+ "CudaError",
1334
+ "DeferredCudaCallError",
1335
+ "Event",
1336
+ "ExternalStream",
1337
+ "OutOfMemoryError",
1338
+ "Stream",
1339
+ "StreamContext",
1340
+ "amp",
1341
+ "caching_allocator_alloc",
1342
+ "caching_allocator_delete",
1343
+ "can_device_access_peer",
1344
+ "check_error",
1345
+ "cudaStatus",
1346
+ "cudart",
1347
+ "current_blas_handle",
1348
+ "current_device",
1349
+ "current_stream",
1350
+ "default_generators",
1351
+ "default_stream",
1352
+ "device",
1353
+ "device_count",
1354
+ "device_of",
1355
+ "empty_cache",
1356
+ "get_allocator_backend",
1357
+ "CUDAPluggableAllocator",
1358
+ "change_current_allocator",
1359
+ "get_arch_list",
1360
+ "get_device_capability",
1361
+ "get_device_name",
1362
+ "get_device_properties",
1363
+ "get_gencode_flags",
1364
+ "get_rng_state",
1365
+ "get_rng_state_all",
1366
+ "get_sync_debug_mode",
1367
+ "graph",
1368
+ "graph_pool_handle",
1369
+ "graphs",
1370
+ "has_half",
1371
+ "has_magma",
1372
+ "init",
1373
+ "initial_seed",
1374
+ "ipc_collect",
1375
+ "is_available",
1376
+ "is_bf16_supported",
1377
+ "is_current_stream_capturing",
1378
+ "is_initialized",
1379
+ "jiterator",
1380
+ "list_gpu_processes",
1381
+ "make_graphed_callables",
1382
+ "manual_seed",
1383
+ "manual_seed_all",
1384
+ "max_memory_allocated",
1385
+ "max_memory_cached",
1386
+ "max_memory_reserved",
1387
+ "mem_get_info",
1388
+ "memory",
1389
+ "memory_allocated",
1390
+ "memory_cached",
1391
+ "memory_reserved",
1392
+ "memory_snapshot",
1393
+ "memory_stats",
1394
+ "memory_stats_as_nested_dict",
1395
+ "memory_summary",
1396
+ "memory_usage",
1397
+ "temperature",
1398
+ "power_draw",
1399
+ "clock_rate",
1400
+ "nccl",
1401
+ "nvtx",
1402
+ "profiler",
1403
+ "random",
1404
+ "reset_accumulated_memory_stats",
1405
+ "reset_max_memory_allocated",
1406
+ "reset_max_memory_cached",
1407
+ "reset_peak_memory_stats",
1408
+ "seed",
1409
+ "seed_all",
1410
+ "set_device",
1411
+ "set_per_process_memory_fraction",
1412
+ "set_rng_state",
1413
+ "set_rng_state_all",
1414
+ "set_stream",
1415
+ "set_sync_debug_mode",
1416
+ "sparse",
1417
+ "stream",
1418
+ "streams",
1419
+ "synchronize",
1420
+ "utilization",
1421
+ ]
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (41.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc ADDED
Binary file (20.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc ADDED
Binary file (22.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (2.17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc ADDED
Binary file (18.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc ADDED
Binary file (6.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc ADDED
Binary file (34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc ADDED
Binary file (3.68 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc ADDED
Binary file (3.02 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc ADDED
Binary file (5.54 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc ADDED
Binary file (173 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc ADDED
Binary file (9.61 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/_memory_viz.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import sys
3
+ import os
4
+ import io
5
+ import subprocess
6
+ import json
7
+ from functools import lru_cache
8
+ from typing import Any
9
+ from itertools import groupby
10
+ import base64
11
+ import warnings
12
+
13
+ cache = lru_cache(None)
14
+
15
+ __all__ = ["format_flamegraph", "segments", "memory", "compare"]
16
+
17
+ def _frame_fmt(f, full_filename=False):
18
+ i = f['line']
19
+ fname = f['filename']
20
+ if not full_filename:
21
+ fname = fname.split('/')[-1]
22
+ func = f['name']
23
+ return f'{fname}:{i}:{func}'
24
+
25
+ @cache
26
+ def _frame_filter(name, filename):
27
+ omit_functions = [
28
+ "unwind::unwind",
29
+ "CapturedTraceback::gather",
30
+ "gather_with_cpp",
31
+ "_start",
32
+ "__libc_start_main",
33
+ "PyEval_",
34
+ "PyObject_",
35
+ "PyFunction_",
36
+ ]
37
+ omit_filenames = [
38
+ "core/boxing",
39
+ "/Register",
40
+ "/Redispatch",
41
+ "pythonrun.c",
42
+ "Modules/main.c",
43
+ "Objects/call.c",
44
+ "Objects/methodobject.c",
45
+ "pycore_ceval.h",
46
+ "ceval.c",
47
+ "cpython/abstract.h",
48
+ ]
49
+ for of in omit_functions:
50
+ if of in name:
51
+ return False
52
+ for of in omit_filenames:
53
+ if of in filename:
54
+ return False
55
+ return True
56
+
57
+ def _frames_fmt(frames, full_filename=False, reverse=False):
58
+ if reverse:
59
+ frames = reversed(frames)
60
+ return [_frame_fmt(f, full_filename) for f in frames if _frame_filter(f['name'], f['filename'])]
61
+
62
+ def _block_extra_legacy(b):
63
+ if 'history' in b:
64
+ frames = b['history'][0].get('frames', [])
65
+ real_size = b['history'][0]['real_size']
66
+ else:
67
+ real_size = b.get('requested_size', b['size'])
68
+ frames = []
69
+ return frames, real_size
70
+
71
+ def _block_extra(b):
72
+ if 'frames' not in b:
73
+ # old snapshot format made it more complicated to get frames/allocated size
74
+ return _block_extra_legacy(b)
75
+ return b['frames'], b['requested_size']
76
+
77
+ def format_flamegraph(flamegraph_lines, flamegraph_script=None):
78
+ if flamegraph_script is None:
79
+ flamegraph_script = f'/tmp/{os.getuid()}_flamegraph.pl'
80
+ if not os.path.exists(flamegraph_script):
81
+ import urllib.request
82
+ print(f"Downloading flamegraph.pl to: {flamegraph_script}")
83
+ urllib.request.urlretrieve(
84
+ 'https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl', flamegraph_script)
85
+ subprocess.check_call(['chmod', '+x', flamegraph_script])
86
+ args = [flamegraph_script, '--countname', 'bytes']
87
+ p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding='utf-8')
88
+ assert p.stdin is not None
89
+ assert p.stdout is not None
90
+ p.stdin.write(flamegraph_lines)
91
+ p.stdin.close()
92
+ result = p.stdout.read()
93
+ p.stdout.close()
94
+ p.wait()
95
+ assert p.wait() == 0
96
+ return result
97
+
98
+ def _write_blocks(f, prefix, blocks):
99
+ def frames_fragment(frames):
100
+ if not frames:
101
+ return "<non-python>"
102
+ return ';'.join(_frames_fmt(frames, reverse=True))
103
+ for b in blocks:
104
+ if 'history' not in b:
105
+ frames, accounted_for_size = _block_extra(b)
106
+ f.write(f'{prefix};{b["state"]};{frames_fragment(frames)} {accounted_for_size}\n')
107
+ else:
108
+ accounted_for_size = 0
109
+ for h in b['history']:
110
+ sz = h['real_size']
111
+ accounted_for_size += sz
112
+ if 'frames' in h:
113
+ frames = h['frames']
114
+ f.write(f'{prefix};{b["state"]};{frames_fragment(frames)} {sz}\n')
115
+ else:
116
+ f.write(f'{prefix};{b["state"]};<no-context> {sz}\n')
117
+ gaps = b['size'] - accounted_for_size
118
+ if gaps:
119
+ f.write(f'{prefix};{b["state"]};<gaps> {gaps}\n')
120
+
121
+ def segments(snapshot, format_flamegraph=format_flamegraph):
122
+ f = io.StringIO()
123
+ for seg in snapshot['segments']:
124
+ prefix = f'stream_{seg["stream"]};seg_{seg["address"]}'
125
+ _write_blocks(f, prefix, seg['blocks'])
126
+ return format_flamegraph(f.getvalue())
127
+
128
+ def memory(snapshot, format_flamegraph=format_flamegraph):
129
+ f = io.StringIO()
130
+ for seg in snapshot['segments']:
131
+ prefix = f'stream_{seg["stream"]}'
132
+ _write_blocks(f, prefix, seg['blocks'])
133
+ return format_flamegraph(f.getvalue())
134
+
135
+ def compare(before, after, format_flamegraph=format_flamegraph):
136
+ def _seg_key(seg):
137
+ return (seg['address'], seg['total_size'])
138
+
139
+ def _seg_info(seg):
140
+ return f'stream_{seg["stream"]};seg_{seg["address"]}'
141
+
142
+ f = io.StringIO()
143
+
144
+ before_segs = {_seg_key(seg) for seg in before}
145
+ after_segs = {_seg_key(seg) for seg in after}
146
+
147
+ print(f'only_before = {[a for a,_ in (before_segs - after_segs)]}')
148
+ print(f'only_after = {[a for a,_ in (after_segs - before_segs)]}')
149
+
150
+ for seg in before:
151
+ if _seg_key(seg) not in after_segs:
152
+ _write_blocks(f, f'only_before;{_seg_info(seg)}', seg['blocks'])
153
+
154
+ for seg in after:
155
+ if _seg_key(seg) not in before_segs:
156
+ _write_blocks(f, f'only_after;{_seg_info(seg)}', seg['blocks'])
157
+
158
+ return format_flamegraph(f.getvalue())
159
+
160
+ def _format_size(num):
161
+ # https://stackoverflow.com/questions/1094841/get-human-readable-version-of-file-size
162
+ for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
163
+ if abs(num) < 1024.0:
164
+ return f"{num:3.1f}{unit}B"
165
+ num /= 1024.0
166
+ return f"{num:.1f}YiB"
167
+
168
+ class Bytes:
169
+ def __init__(self, value):
170
+ self.value = value
171
+
172
+ def __add__(self, rhs):
173
+ return Bytes(self.value + rhs)
174
+
175
+ def __repr__(self):
176
+ return _format_size(self.value)
177
+
178
+ def calc_active(seg):
179
+ return sum(b['size'] for b in seg['blocks'] if b['state'] == 'active_allocated')
180
+
181
+ def _report_free(free_external, free_internal):
182
+ total = free_external + free_internal
183
+ suffix = ''
184
+ if total != 0:
185
+ pct = (free_internal / total) * 100
186
+ suffix = f' ({pct:.1f}% internal)'
187
+ return f'{Bytes(total)}{suffix}'
188
+
189
+ PAGE_SIZE = 1024 * 1024 * 20
190
+ legend = f"""\
191
+
192
+ Legend:
193
+ [a ] - a segment in the allocator
194
+ ^-- a page {Bytes(PAGE_SIZE)} of memory in the segment
195
+ a-z: pages filled with a single block's content
196
+ ' ': page is completely free
197
+ *: page if completely full with multiple blocks
198
+ 0-9: page is partially full with tensors of multiple blocks (9 == 90% full)
199
+ (X% internal) - of the free memory, X% is free because we rounded the size of the allocation.
200
+ """
201
+
202
+ def segsum(data):
203
+ r"""Visually reports how the allocator has filled its segments.
204
+
205
+ This printout can help debug fragmentation issues since free fragments
206
+ will appear as gaps in this printout. The amount of free space is reported
207
+ for each segment.
208
+ We distinguish between internal free memory which occurs because the
209
+ allocator rounds the allocation size, and external free memory, which are
210
+ the gaps between allocations in a segment.
211
+ Args:
212
+ data: snapshot dictionary created from _snapshot()
213
+ """
214
+ segments = []
215
+ out = io.StringIO()
216
+ out.write(f"Summary of segments >= {Bytes(PAGE_SIZE)} in size\n")
217
+ total_reserved = 0
218
+ total_allocated = 0
219
+ free_external = 0
220
+ free_internal = 0
221
+ for seg in sorted(data['segments'], key=lambda x: (x['total_size'], calc_active(x))):
222
+ total_reserved += seg['total_size']
223
+
224
+ seg_free_external = 0
225
+ seg_free_internal = 0
226
+ seg_allocated = 0
227
+ all_ranges = []
228
+ boffset = 0
229
+ for b in seg['blocks']:
230
+ active = b['state'] == 'active_allocated'
231
+ if active:
232
+ _, allocated_size = _block_extra(b)
233
+ all_ranges.append((boffset, allocated_size, True))
234
+ seg_allocated += allocated_size
235
+ seg_free_internal += b['size'] - allocated_size
236
+ else:
237
+ seg_free_external += b['size']
238
+
239
+ boffset += b['size']
240
+
241
+ total_allocated += seg_allocated
242
+ free_external += seg_free_external
243
+ free_internal += seg_free_internal
244
+
245
+ nseg = (seg['total_size'] - 1) // PAGE_SIZE + 1
246
+ occupied = [' ' for _ in range(nseg)]
247
+ frac = [0.0 for _ in range(nseg)]
248
+ active_size = 0
249
+ for i, (start_, size, active) in enumerate(all_ranges):
250
+ active_size += size
251
+ finish_ = (start_ + size)
252
+ start = start_ // PAGE_SIZE
253
+ finish = (finish_ - 1) // PAGE_SIZE + 1
254
+ m = chr(ord('a' if active else 'A') + (i % 26))
255
+ for j in range(start, finish):
256
+ s = max(start_, j * PAGE_SIZE)
257
+ e = min(finish_, (j + 1) * PAGE_SIZE)
258
+ frac[j] += (e - s) / PAGE_SIZE
259
+ if occupied[j] != ' ':
260
+ occupied[j] = '0123456789*'[int(frac[j] * 10)]
261
+ else:
262
+ occupied[j] = m
263
+ stream = '' if seg['stream'] == 0 else f', stream_{seg["stream"]}'
264
+ body = ''.join(occupied)
265
+ assert seg_free_external + seg_free_internal + seg_allocated == seg['total_size']
266
+ stream = f' stream_{seg["stream"]}' if seg['stream'] != 0 else ''
267
+ if seg['total_size'] >= PAGE_SIZE:
268
+ out.write(f'[{body}] {Bytes(seg["total_size"])} allocated, '
269
+ f'{_report_free(seg_free_external, seg_free_internal)} free{stream}\n')
270
+ out.write(f'segments: {len(data["segments"])}\n')
271
+ out.write(f'total_reserved: {Bytes(total_reserved)}\n')
272
+ out.write(f'total_allocated: {Bytes(total_allocated)}\n')
273
+ internal_external = f' ({Bytes(free_internal)} internal + {Bytes(free_external)} external)' if free_internal else ''
274
+ out.write(f'total_free: {_report_free(free_external, free_internal)}\n')
275
+ out.write(legend)
276
+ assert free_internal + free_external + total_allocated == total_reserved
277
+ return out.getvalue()
278
+
279
+ def trace(data):
280
+ out = io.StringIO()
281
+
282
+ def format(entries):
283
+ segment_intervals : list = []
284
+ segment_addr_to_name = {}
285
+ allocation_addr_to_name = {}
286
+
287
+ free_names : list = []
288
+ next_name = 0
289
+
290
+ def _name():
291
+ nonlocal next_name
292
+ if free_names:
293
+ return free_names.pop()
294
+ r, m = next_name // 26, next_name % 26
295
+ next_name += 1
296
+ return f'{chr(ord("a") + m)}{"" if r == 0 else r}'
297
+
298
+ def find_segment(addr):
299
+ for name, saddr, size in segment_intervals:
300
+ if addr >= saddr and addr < saddr + size:
301
+ return name, saddr
302
+ for i, seg in enumerate(data['segments']):
303
+ saddr = seg['address']
304
+ size = seg['allocated_size']
305
+ if addr >= saddr and addr < saddr + size:
306
+ return f'seg_{i}', saddr
307
+ return None, None
308
+ count = 0
309
+ out.write(f'{len(entries)} entries\n')
310
+
311
+
312
+ total_reserved = 0
313
+ for seg in data['segments']:
314
+ total_reserved += seg['total_size']
315
+
316
+ for count, e in enumerate(entries):
317
+ if e['action'] == 'alloc':
318
+ addr, size = e['addr'], e['size']
319
+ n = _name()
320
+ seg_name, seg_addr = find_segment(addr)
321
+ if seg_name is None:
322
+ seg_name = "MEM"
323
+ offset = addr
324
+ else:
325
+ offset = addr - seg_addr
326
+ out.write(f'{n} = {seg_name}[{offset}:{Bytes(size)}]\n')
327
+ allocation_addr_to_name[addr] = (n, size, count)
328
+ count += size
329
+ elif e['action'] == 'free_requested':
330
+ addr, size = e['addr'], e['size']
331
+ name, _, _ = allocation_addr_to_name.get(addr, (addr, None, None))
332
+ out.write(f'del {name} # {Bytes(size)}\n')
333
+ elif e['action'] == 'free_completed':
334
+ addr, size = e['addr'], e['size']
335
+ count -= size
336
+ name, _, _ = allocation_addr_to_name.get(addr, (addr, None, None))
337
+ out.write(f'# free completed for {name} {Bytes(size)}\n')
338
+ if name in allocation_addr_to_name:
339
+ free_names.append(name)
340
+ del allocation_addr_to_name[name]
341
+ elif e['action'] == 'segment_alloc':
342
+ addr, size = e['addr'], e['size']
343
+ name = _name()
344
+ out.write(f'{name} = cudaMalloc({addr}, {Bytes(size)})\n')
345
+ segment_intervals.append((name, addr, size))
346
+ segment_addr_to_name[addr] = name
347
+ elif e['action'] == 'segment_free':
348
+ addr, size = e['addr'], e['size']
349
+ name = segment_addr_to_name.get(addr, addr)
350
+ out.write(f'cudaFree({name}) # {Bytes(size)}\n')
351
+ if name in segment_addr_to_name:
352
+ free_names.append(name)
353
+ del segment_addr_to_name[name]
354
+ elif e['action'] == 'oom':
355
+ size = e['size']
356
+ free = e['device_free']
357
+ out.write(f'raise OutOfMemoryError() # {Bytes(size)} requested, {Bytes(free)} free in CUDA\n')
358
+ else:
359
+ out.write(f'{e}\n')
360
+ out.write(f"TOTAL MEM: {Bytes(count)}")
361
+ for i, d in enumerate(data['device_traces']):
362
+ if d:
363
+ out.write(f'Device {i} ----------------\n')
364
+ format(d)
365
+ return out.getvalue()
366
+
367
+
368
+ _memory_viz_template = r"""
369
+ <!DOCTYPE html>
370
+ <html>
371
+ <head>
372
+ </head>
373
+ <body>
374
+ <script type="module">
375
+ import {add_local_files} from "https://cdn.jsdelivr.net/gh/pytorch/pytorch@main/torch/utils/viz/MemoryViz.js"
376
+ const local_files = $SNAPSHOT
377
+ add_local_files(local_files, $VIZ_KIND)
378
+ </script>
379
+ </body>
380
+ """
381
+
382
+ def _format_viz(data, viz_kind, device):
383
+ if device is not None:
384
+ warnings.warn('device argument is deprecated, plots now contain all device')
385
+ buffer = pickle.dumps(data)
386
+ buffer += b'\x00' * (3 - len(buffer) % 3)
387
+ # Encode the buffer with base64
388
+ encoded_buffer = base64.b64encode(buffer).decode('utf-8')
389
+
390
+ json_format = json.dumps([{"name": 'snapshot.pickle', "base64": encoded_buffer}])
391
+ return _memory_viz_template.replace('$VIZ_KIND', repr(viz_kind)) \
392
+ .replace('$SNAPSHOT', json_format)
393
+
394
+ def trace_plot(data, device=None, plot_segments=False):
395
+ """Generate a visualization over time of the memory usage recorded by the trace as an html file.
396
+
397
+ Args:
398
+ data: Memory snapshot as generated from torch.cuda.memory._snapshot()
399
+ device (torch.device, optional): Generate the trace for this device, needed if multiple devices have allocations.
400
+ plot_segments (bool, optional): Plots memory returned from cudaMalloc, rather than individual allocations.
401
+ Defaults to False.
402
+
403
+ Returns:
404
+ str: HTML of visualization
405
+ """
406
+ return _format_viz(data, 'Active Memory Timeline' if not plot_segments else 'Active Cached Memory Timeline', device)
407
+
408
+
409
+ def _profile_to_snapshot(profile):
410
+ import torch
411
+ from torch.profiler._memory_profiler import Action, TensorKey
412
+ from torch._C._profiler import _EventType
413
+ memory_profile = profile._memory_profile()
414
+
415
+ allocation_stacks = {}
416
+ for event in memory_profile._op_tree.sorted_nodes:
417
+ if event.tag == _EventType.Allocation:
418
+ parent = event.parent
419
+ python_parents = []
420
+ while parent:
421
+ if parent.tag in (_EventType.PyCall, _EventType.PyCCall):
422
+ python_parents.append(parent)
423
+ parent = parent.parent
424
+ key = TensorKey.from_allocation(event.extra_fields)
425
+
426
+ # Corner case: If allocation doesn't have an ID (can't prove it was used as a Tensor)
427
+ # key will be None. I should add some way to identify these, I just haven't yet.
428
+ if key and event.extra_fields.alloc_size > 0:
429
+ allocation_stacks[key] = python_parents
430
+
431
+
432
+ device_count = torch.cuda.device_count()
433
+ snapshot = {
434
+ 'device_traces': [[] for _ in range(device_count + 1)],
435
+ 'segments': [{'device': device,
436
+ 'address': None,
437
+ 'total_size': 0,
438
+ 'stream': 0,
439
+ 'blocks': []} for device in range(device_count + 1)]
440
+ }
441
+
442
+ def to_device(device):
443
+ if device.type == 'cuda':
444
+ return device.index
445
+ else:
446
+ return device_count
447
+
448
+ def allocate(size, tensor_key, version, during_trace=True):
449
+ device = to_device(tensor_key.device)
450
+ addr = tensor_key.storage.ptr
451
+
452
+ seg = snapshot['segments'][device] # type: ignore[index]
453
+ if seg['address'] is None or seg['address'] > addr:
454
+ seg['address'] = addr
455
+ seg['total_size'] = max(seg['total_size'], addr + size) # record max addr for now, we will make it the size later
456
+ category = memory_profile._categories.get(tensor_key, version)
457
+ category = category.name.lower() if category is not None else "unknown"
458
+ stack = allocation_stacks.get(tensor_key, ())
459
+ stack = [{'filename': 'none', 'line': 0, 'name': p.name} for p in stack]
460
+ r = {'action': 'alloc', 'addr': addr, 'size': size, 'stream': 0, 'frames': stack, 'category': category}
461
+ if during_trace:
462
+ snapshot['device_traces'][device].append(r) # type: ignore[index]
463
+ return r
464
+
465
+ def free(alloc, device):
466
+ for e in ('free_requested', 'free_completed'):
467
+ snapshot['device_traces'][device].append({'action': e, # type: ignore[index]
468
+ 'addr': alloc['addr'],
469
+ 'size': alloc['size'],
470
+ 'stream': 0,
471
+ 'frames': alloc['frames']})
472
+
473
+ kv_to_elem = {}
474
+
475
+
476
+
477
+ # create the device trace
478
+ for time, action, (tensor_key, version), size in memory_profile.timeline:
479
+ if not isinstance(tensor_key, TensorKey):
480
+ continue
481
+ if action == Action.CREATE:
482
+ kv_to_elem[(tensor_key, version)] = allocate(size, tensor_key, version)
483
+ elif action == Action.DESTROY:
484
+ free(kv_to_elem.pop((tensor_key, version)), to_device(tensor_key.device))
485
+ elif action == Action.INCREMENT_VERSION:
486
+ free(kv_to_elem.pop((tensor_key, version)), to_device(tensor_key.device))
487
+ kv_to_elem[(tensor_key, version + 1)] = allocate(size, tensor_key, version + 1)
488
+ elif action == Action.PREEXISTING:
489
+ kv_to_elem[(tensor_key, version)] = allocate(size, tensor_key, version, during_trace=False)
490
+
491
+
492
+ # create the final snapshot state
493
+ blocks_at_end = [(to_device(tensor_key.device), event['addr'], event['size'], event['frames'])
494
+ for (tensor_key, version), event in kv_to_elem.items()]
495
+ for device, blocks in groupby(sorted(blocks_at_end), key=lambda x: x[0]):
496
+ seg = snapshot['segments'][device] # type: ignore[index]
497
+ last_addr = seg['address']
498
+ for _, addr, size, frames in blocks:
499
+ if last_addr < addr:
500
+ seg['blocks'].append({'size': addr - last_addr, 'state': 'inactive'})
501
+ seg['blocks'].append({'size': size, 'state': 'active_allocated', 'requested_size': size, 'frames': frames})
502
+ last_addr = addr + size
503
+ if last_addr < seg['total_size']:
504
+ seg['blocks'].append({'size': seg['total_size'] - last_addr, 'state': 'inactive'})
505
+
506
+ snapshot['segments'] = [seg for seg in snapshot['segments'] if seg['blocks']] # type: ignore[attr-defined]
507
+ for seg in snapshot['segments']: # type: ignore[attr-defined, name-defined, no-redef]
508
+ seg['total_size'] -= seg['address']
509
+ if not seg['blocks']:
510
+ seg['blocks'].append({'size': seg['total_size'], 'state': 'inactive'})
511
+
512
+ return snapshot
513
+
514
+ def profile_plot(profile, device=None):
515
+ """Generate a visualization over time of the memory usage recorded by kineto memory profiling as an html file.
516
+
517
+ Args:
518
+ profile: profile as generated by `torch.profiler.profile(profile_memory=True)`
519
+ device (torch.device, optional): Generate the trace for this device, needed if multiple devices have allocations.
520
+
521
+ Returns:
522
+ str: HTML of visualization
523
+ """
524
+ snapshot = _profile_to_snapshot(profile)
525
+ return _format_viz(snapshot, 'Active Memory Timeline', device)
526
+
527
+
528
+ def segment_plot(data: Any, device=None):
529
+ return _format_viz(data, 'Allocator State History', device)
530
+
531
+ if __name__ == "__main__":
532
+ import os.path
533
+ thedir = os.path.realpath(os.path.dirname(__file__))
534
+ if thedir in sys.path:
535
+ # otherwise we find cuda/random.py as random...
536
+ sys.path.remove(thedir)
537
+ import argparse
538
+
539
+ fn_name = 'torch.cuda.memory._snapshot()'
540
+ pickled = f'pickled memory statistics from {fn_name}'
541
+ parser = argparse.ArgumentParser(description=f'Visualize memory dumps produced by {fn_name}')
542
+
543
+ subparsers = parser.add_subparsers(dest='action')
544
+
545
+ def _output(p):
546
+ p.add_argument('-o', '--output', default='output.svg', help='flamegraph svg (default: output.svg)')
547
+
548
+ description = 'Prints overall allocation statistics and a visualization of how the allocators segments are currently filled.'
549
+ stats_a = subparsers.add_parser('stats', description=description)
550
+ stats_a.add_argument('input', help=pickled)
551
+
552
+ description = 'Prints buffer of the most recent allocation events embedded in the snapshot in a Pythonic style.'
553
+ trace_a = subparsers.add_parser('trace', description=description)
554
+ trace_a.add_argument('input', help=pickled)
555
+
556
+ description = 'Generate a flamegraph that visualizes what memory is stored in each allocator segment (aka block)'
557
+ segments_a = subparsers.add_parser('segments', description=description)
558
+ segments_a.add_argument('input', help=pickled)
559
+ _output(segments_a)
560
+
561
+ description = "Generate a flamegraph the program locations contributing to CUDA memory usage."
562
+ memory_a = subparsers.add_parser('memory', description=description)
563
+ memory_a.add_argument('input', help=pickled)
564
+ _output(memory_a)
565
+
566
+ description = 'Generate a flamegraph that shows segments (aka blocks) that have been added ' \
567
+ 'or removed between two different memorys snapshots.'
568
+ compare_a = subparsers.add_parser('compare', description=description)
569
+ compare_a.add_argument('before', help=pickled)
570
+ compare_a.add_argument('after', help=pickled)
571
+ _output(compare_a)
572
+
573
+ plots = (
574
+ ("trace_plot", "Generate a visualization over time of the memory usage recorded by the trace as an html file."),
575
+ ("segment_plot", "Visualize how allocations are packed into allocator segments at each point in a trace as an html file.")
576
+ )
577
+ for cmd, description in plots:
578
+ trace_plot_a = subparsers.add_parser(cmd, description=description)
579
+ trace_plot_a.add_argument('input', help=pickled)
580
+ help = 'visualize trace from this device (default: chooses the only device with trace info or errors)'
581
+ trace_plot_a.add_argument('-d', '--device', type=int, default=None, help=help)
582
+ help = 'path to save the visualization(default: output.html)'
583
+ trace_plot_a.add_argument('-o', '--output', default='output.html', help=help)
584
+ if cmd == "trace_plot":
585
+ help = 'visualize change to segments rather than individual allocations'
586
+ trace_plot_a.add_argument('-s', '--segments', action='store_true', help=help)
587
+
588
+
589
+ args = parser.parse_args()
590
+
591
+ def _read(name):
592
+ if name == '-':
593
+ f = sys.stdin.buffer
594
+ else:
595
+ f = open(name, 'rb')
596
+ data = pickle.load(f)
597
+ if isinstance(data, list): # segments only...
598
+ data = {'segments': data, 'traces': []}
599
+ return data
600
+
601
+ def _write(name, data):
602
+ with open(name, 'w') as f:
603
+ f.write(data)
604
+
605
+ if args.action == 'segments':
606
+ data = _read(args.input)
607
+ _write(args.output, segments(data))
608
+ elif args.action == 'memory':
609
+ data = _read(args.input)
610
+ _write(args.output, memory(data))
611
+ elif args.action == 'stats':
612
+ data = _read(args.input)
613
+ print(segsum(data))
614
+ elif args.action == 'trace':
615
+ data = _read(args.input)
616
+ print(trace(data))
617
+ elif args.action == 'compare':
618
+ before = _read(args.before)
619
+ after = _read(args.after)
620
+ _write(args.output, compare(before, after))
621
+ elif args.action == 'trace_plot':
622
+ data = _read(args.input)
623
+ _write(args.output, trace_plot(data, device=args.device, plot_segments=args.segments))
624
+ elif args.action == 'segment_plot':
625
+ data = _read(args.input)
626
+ _write(args.output, segment_plot(data, device=args.device))
env-llmeval/lib/python3.10/site-packages/torch/cuda/_sanitizer.py ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ This module introduces CUDA Sanitizer, a tool for detecting synchronization errors between kernels ran on different streams.
3
+
4
+ It stores information on accesses to tensors to determine if they are synchronized
5
+ or not. When enabled in a python program and a possible data race is detected, a
6
+ detailed warning will be printed and the program will exit.
7
+
8
+ It can be enabled either by importing this module and calling
9
+ :func:`enable_cuda_sanitizer()` or by exporting the ``TORCH_CUDA_SANITIZER``
10
+ environment variable.
11
+ """
12
+
13
+ import enum
14
+ import functools
15
+ import inspect
16
+ import io
17
+ import logging
18
+ import sys
19
+ import textwrap
20
+ import traceback
21
+ from dataclasses import dataclass, field
22
+ from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, TypeVar
23
+
24
+ import torch
25
+ import torch.utils._cuda_trace as cuda_trace
26
+ from torch.utils import _pytree as pytree
27
+ from torch.utils._python_dispatch import TorchDispatchMode
28
+
29
+
30
+ DEFAULT_STREAM_ID = 0
31
+
32
+ TK = TypeVar("TK")
33
+ TVa = TypeVar("TVa")
34
+ TVb = TypeVar("TVb")
35
+
36
+ DataPtr = int
37
+ StreamId = int
38
+ EventId = int
39
+ SeqNum = int
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ class AccessType(enum.Enum):
45
+ READ = enum.auto()
46
+ WRITE = enum.auto()
47
+
48
+ def __str__(self):
49
+ return "reading from" if self is AccessType.READ else "writing to"
50
+
51
+
52
+ @dataclass
53
+ class Access:
54
+ r"""Stores information about a single access to a tensor by a kernel.
55
+
56
+ Args:
57
+ type: either AccessType.READ or AccessType.Write.
58
+ seq_num: the sequential number of the kernel performing the access.
59
+ stream: the stream id of the stream executing the kernel.
60
+ operator: the schema of the launched kernel, which lists the
61
+ arguments and return type.
62
+ aliases: the arguments in the schema this access corresponds to.
63
+ is_output: Whether the tensor was an output of the kernel.
64
+ stack_trace: the stack summary object captured during access.
65
+ """
66
+
67
+ type: AccessType
68
+ seq_num: SeqNum
69
+ stream: StreamId
70
+ operator: str
71
+ aliases: List[str]
72
+ is_output: bool
73
+ stack_trace: traceback.StackSummary
74
+
75
+
76
+ class SynchronizationError(Exception):
77
+ """Base class for errors detected by CUDA Sanitizer."""
78
+
79
+ pass
80
+
81
+
82
+ class UnsynchronizedAccessError(SynchronizationError):
83
+ """Stores information about two unsynchronized accesses to one data pointer."""
84
+
85
+ def __init__(
86
+ self,
87
+ data_ptr: DataPtr,
88
+ allocation_stack_trace: Optional[traceback.StackSummary],
89
+ current_access: Access,
90
+ previous_access: Access,
91
+ ):
92
+ self.data_ptr = data_ptr
93
+ self.allocation_stack_trace = allocation_stack_trace
94
+ self.current_access = current_access
95
+ self.previous_access = previous_access
96
+
97
+ def __str__(self):
98
+ def format_access(access: Access):
99
+ message.write(f"{access.operator}\n{access.type}")
100
+ if access.aliases:
101
+ message.write(" argument(s) " + ", ".join(access.aliases))
102
+ if access.is_output:
103
+ message.write(", and to")
104
+ if access.is_output:
105
+ message.write(" the output")
106
+ message.write(
107
+ f"\nWith stack trace:\n{''.join(access.stack_trace.format())}\n"
108
+ )
109
+
110
+ with io.StringIO() as message:
111
+ message.write(
112
+ textwrap.dedent(
113
+ f"""\
114
+ ============================
115
+ CSAN detected a possible data race on tensor with data pointer {self.data_ptr}
116
+ Access by stream {self.current_access.stream} during kernel:
117
+ """
118
+ )
119
+ )
120
+ format_access(self.current_access)
121
+
122
+ message.write(
123
+ f"Previous access by stream {self.previous_access.stream} during kernel:\n"
124
+ )
125
+ format_access(self.previous_access)
126
+
127
+ if self.allocation_stack_trace:
128
+ message.write(
129
+ "Tensor was allocated with stack trace:\n"
130
+ f"{''.join(self.allocation_stack_trace.format())}"
131
+ )
132
+ else:
133
+ message.write("Trace for tensor allocation not found.")
134
+ return message.getvalue()
135
+
136
+
137
+ class CUDASanitizerErrors(Exception):
138
+ """Wrapper class for errors reported by CUDA Sanitizer."""
139
+
140
+ def __init__(self, errors: List[SynchronizationError]):
141
+ self.errors = errors
142
+
143
+ def __str__(self):
144
+ return f"detected {len(self.errors)} errors"
145
+
146
+
147
+ @dataclass
148
+ class TensorInfo:
149
+ r"""Stores information about a single tensor and recent accesses to it.
150
+
151
+ Args:
152
+ allocation_stack_trace: the stack summary object captured during tensor
153
+ allocation. Can be ``None`` if the allocation wasn't caught by CSAN.
154
+ reads: list of read accesses to the tensor that were performed since
155
+ the last write.
156
+ write: the last write access to the tensor.
157
+ """
158
+
159
+ allocation_stack_trace: Optional[traceback.StackSummary]
160
+ reads: List[Access] = field(default_factory=list)
161
+ write: Optional[Access] = None
162
+
163
+
164
+ class _TensorsAccessed:
165
+ def __init__(self):
166
+ self.accesses: Dict[DataPtr, TensorInfo] = {}
167
+
168
+ def ensure_tensor_exists(self, data_ptr: DataPtr) -> None:
169
+ if data_ptr not in self.accesses:
170
+ logger.info(
171
+ "Found tensor with pointer: %s, but no matching tensor "
172
+ "allocation in the trace. Backfilling the trace now. "
173
+ "Perhaps the sanitizer was enabled after some torch operations?",
174
+ data_ptr,
175
+ )
176
+ self.create_tensor(data_ptr, None)
177
+
178
+ def ensure_tensor_does_not_exist(self, data_ptr: DataPtr) -> None:
179
+ if data_ptr in self.accesses:
180
+ logger.info(
181
+ "Found duplicate tensor allocation in the trace for tensor with "
182
+ "pointer: %s. Assuming the trace for tensor deallocation "
183
+ "wasn't caught and backfilling it now. "
184
+ "Perhaps the sanitizer was enabled after some torch operations?",
185
+ data_ptr,
186
+ )
187
+ self.delete_tensor(data_ptr)
188
+
189
+ def create_tensor(
190
+ self, data_ptr: DataPtr, stack_trace: Optional[traceback.StackSummary]
191
+ ) -> None:
192
+ self.accesses[data_ptr] = TensorInfo(stack_trace)
193
+
194
+ def delete_tensor(self, data_ptr: DataPtr) -> None:
195
+ del self.accesses[data_ptr]
196
+
197
+ def were_there_reads_since_last_write(self, data_ptr: DataPtr) -> bool:
198
+ return True if self.accesses[data_ptr].reads else False
199
+
200
+ def get_allocation_stack_trace(
201
+ self, data_ptr: DataPtr
202
+ ) -> Optional[traceback.StackSummary]:
203
+ return self.accesses[data_ptr].allocation_stack_trace
204
+
205
+ def get_write(self, data_ptr: DataPtr) -> Optional[Access]:
206
+ return self.accesses[data_ptr].write
207
+
208
+ def get_reads(self, data_ptr: DataPtr) -> List[Access]:
209
+ return self.accesses[data_ptr].reads
210
+
211
+ def add_read(self, data_ptr: DataPtr, access: Access) -> None:
212
+ self.accesses[data_ptr].reads.append(access)
213
+
214
+ def set_write(self, data_ptr: DataPtr, access: Access) -> None:
215
+ self.accesses[data_ptr].write = access
216
+ self.accesses[data_ptr].reads = []
217
+
218
+
219
+ class StreamSynchronizations:
220
+ def __init__(self):
221
+ self.current_sync_states: Dict[StreamId, Dict[StreamId, SeqNum]] = {}
222
+ self.recorded_sync_states: Dict[EventId, Dict[StreamId, SeqNum]] = {}
223
+ self.host_sync_state: Dict[StreamId, SeqNum] = {}
224
+ self.create_stream(DEFAULT_STREAM_ID)
225
+
226
+ def _ensure_stream_exists(self, stream: StreamId) -> None:
227
+ if stream not in self.current_sync_states:
228
+ logger.info(
229
+ "Found Stream with id: %s, but no matching stream "
230
+ "creation in the trace. Backfilling the trace now. "
231
+ "Perhaps the sanitizer was enabled after some torch operations?",
232
+ stream,
233
+ )
234
+ self.create_stream(stream)
235
+
236
+ def _ensure_event_exists(self, event: EventId) -> None:
237
+ if event not in self.recorded_sync_states:
238
+ logger.info(
239
+ "Found Event with id: %s, but no matching event "
240
+ "creation in the trace. Backfilling the trace now. "
241
+ "Perhaps the sanitizer was enabled after some torch operations?",
242
+ event,
243
+ )
244
+ self.create_event(event)
245
+
246
+ def _ensure_event_does_not_exist(self, event: EventId) -> None:
247
+ if event in self.recorded_sync_states:
248
+ logger.info(
249
+ "Found duplicate event creation in the trace for event with "
250
+ "id: %s. Assuming the trace for event deletion wasn't caught "
251
+ "and backfilling it now. "
252
+ "Perhaps the sanitizer was enabled after some torch operations?",
253
+ event,
254
+ )
255
+ self.delete_event(event)
256
+
257
+ def create_stream(self, stream: StreamId) -> None:
258
+ if stream in self.current_sync_states:
259
+ logger.info(
260
+ "Found duplicate Stream creation in the trace for Stream with "
261
+ "id: %s. PyTorch Streams are only created once, so this "
262
+ "trace entry is ignored.",
263
+ stream,
264
+ )
265
+ else:
266
+ self.host_sync_state[stream] = 0
267
+ self.current_sync_states[stream] = self.host_sync_state.copy()
268
+
269
+ def create_event(self, event: EventId) -> None:
270
+ self._ensure_event_does_not_exist(event)
271
+ self.recorded_sync_states[event] = {}
272
+
273
+ def delete_event(self, event: EventId) -> None:
274
+ self._ensure_event_exists(event)
275
+ del self.recorded_sync_states[event]
276
+
277
+ def update_seq_num(self, stream: StreamId, seq_num: SeqNum) -> None:
278
+ self._ensure_stream_exists(stream)
279
+ self.current_sync_states[stream][stream] = seq_num
280
+
281
+ def record_state(self, event: EventId, stream: StreamId) -> None:
282
+ self._ensure_event_exists(event)
283
+ self._ensure_stream_exists(stream)
284
+ self.recorded_sync_states[event] = self.current_sync_states[stream].copy()
285
+
286
+ def _state_wait_for_other(
287
+ self, state: Dict[StreamId, SeqNum], other: Dict[StreamId, SeqNum]
288
+ ) -> None:
289
+ for stream, seq_num in other.items():
290
+ state[stream] = max(state.get(stream, -1), seq_num)
291
+
292
+ def stream_wait_for_event(self, stream: StreamId, event: EventId) -> None:
293
+ self._ensure_stream_exists(stream)
294
+ self._ensure_event_exists(event)
295
+ self._state_wait_for_other(
296
+ self.current_sync_states[stream], self.recorded_sync_states[event]
297
+ )
298
+
299
+ def all_streams_wait_for_event(self, event: EventId) -> None:
300
+ self._ensure_event_exists(event)
301
+ for stream in self.current_sync_states.keys():
302
+ self.stream_wait_for_event(stream, event)
303
+
304
+ self._state_wait_for_other(
305
+ self.host_sync_state, self.recorded_sync_states[event]
306
+ )
307
+
308
+ def all_streams_wait_for_stream(self, stream: StreamId) -> None:
309
+ self._ensure_stream_exists(stream)
310
+ for state in self.current_sync_states.values():
311
+ self._state_wait_for_other(state, self.current_sync_states[stream])
312
+
313
+ self._state_wait_for_other(
314
+ self.host_sync_state, self.current_sync_states[stream]
315
+ )
316
+
317
+ def sync_all_streams(self) -> None:
318
+ for stream, state in self.current_sync_states.items():
319
+ self.host_sync_state[stream] = state[stream]
320
+
321
+ for state in self.current_sync_states.values():
322
+ self._state_wait_for_other(state, self.host_sync_state)
323
+
324
+ def is_ordered_after(
325
+ self, current_stream: StreamId, seq_num: SeqNum, other_stream: StreamId
326
+ ) -> bool:
327
+ self._ensure_stream_exists(current_stream)
328
+ self._ensure_stream_exists(other_stream)
329
+ return seq_num <= self.current_sync_states[current_stream].get(other_stream, -1)
330
+
331
+
332
+ class EventHandler:
333
+ """Analyzes CSAN trace for synchronization errors.
334
+
335
+ Stores information on each stream's synchronizations with other streams as well
336
+ as tensor accesses to determine whether a given kernel launch might cause a
337
+ data race.
338
+ """
339
+
340
+ def __init__(self):
341
+ self.tensors_accessed = _TensorsAccessed()
342
+ self.syncs = StreamSynchronizations()
343
+ self.seq_num: SeqNum = 0
344
+
345
+ def _handle_kernel_launch(
346
+ self,
347
+ stream: StreamId,
348
+ read_only: Set[DataPtr],
349
+ read_write: Set[DataPtr],
350
+ outputs: Set[DataPtr],
351
+ operator: str,
352
+ tensor_aliases: Dict[int, List[str]],
353
+ ) -> List[SynchronizationError]:
354
+ def check_conflict(
355
+ data_ptr: DataPtr, current_access: Access, previous_access: Optional[Access]
356
+ ) -> None:
357
+ if previous_access is None:
358
+ return
359
+ if not self.syncs.is_ordered_after(
360
+ current_access.stream, previous_access.seq_num, previous_access.stream
361
+ ):
362
+ error_list.append(
363
+ UnsynchronizedAccessError(
364
+ data_ptr,
365
+ self.tensors_accessed.get_allocation_stack_trace(data_ptr),
366
+ current_access,
367
+ previous_access,
368
+ )
369
+ )
370
+
371
+ error_list: List[SynchronizationError] = []
372
+ self.seq_num += 1
373
+ self.syncs.update_seq_num(stream, self.seq_num)
374
+ stack_trace = traceback.StackSummary.extract(
375
+ traceback.walk_stack(inspect.currentframe()), lookup_lines=False
376
+ )
377
+ # The stack trace generated in this way is in the inverse order, so it must be
378
+ # reversed.
379
+ stack_trace.reverse()
380
+
381
+ for data_ptr in read_only:
382
+ self.tensors_accessed.ensure_tensor_exists(data_ptr)
383
+ current_access = Access(
384
+ AccessType.READ,
385
+ self.seq_num,
386
+ stream,
387
+ operator,
388
+ tensor_aliases[data_ptr],
389
+ data_ptr in outputs,
390
+ stack_trace,
391
+ )
392
+ check_conflict(
393
+ data_ptr, current_access, self.tensors_accessed.get_write(data_ptr)
394
+ )
395
+ self.tensors_accessed.add_read(data_ptr, current_access)
396
+
397
+ for data_ptr in read_write:
398
+ self.tensors_accessed.ensure_tensor_exists(data_ptr)
399
+ current_access = Access(
400
+ AccessType.WRITE,
401
+ self.seq_num,
402
+ stream,
403
+ operator,
404
+ tensor_aliases[data_ptr],
405
+ data_ptr in outputs,
406
+ stack_trace,
407
+ )
408
+ if self.tensors_accessed.were_there_reads_since_last_write(data_ptr):
409
+ for previous_access in self.tensors_accessed.get_reads(data_ptr):
410
+ check_conflict(data_ptr, current_access, previous_access)
411
+ else:
412
+ check_conflict(
413
+ data_ptr, current_access, self.tensors_accessed.get_write(data_ptr)
414
+ )
415
+ self.tensors_accessed.set_write(data_ptr, current_access)
416
+
417
+ return error_list
418
+
419
+ def _handle_event_creation(self, event: EventId) -> None:
420
+ self.syncs.create_event(event)
421
+
422
+ def _handle_event_deletion(self, event: EventId) -> None:
423
+ self.syncs.delete_event(event)
424
+
425
+ def _handle_event_record(self, event: EventId, stream: StreamId) -> None:
426
+ self.syncs.record_state(event, stream)
427
+
428
+ def _handle_event_wait(self, event: EventId, stream: StreamId) -> None:
429
+ self.syncs.stream_wait_for_event(stream, event)
430
+
431
+ def _handle_memory_allocation(self, data_ptr: DataPtr) -> None:
432
+ self.tensors_accessed.ensure_tensor_does_not_exist(data_ptr)
433
+ stack_trace = traceback.StackSummary.extract(
434
+ traceback.walk_stack(inspect.currentframe()), lookup_lines=False
435
+ )
436
+ # The stack trace generated in this way is in the inverse order, so it must be
437
+ # reversed.
438
+ stack_trace.reverse()
439
+ self.tensors_accessed.create_tensor(
440
+ data_ptr,
441
+ stack_trace,
442
+ )
443
+
444
+ def _handle_memory_deallocation(self, data_ptr: DataPtr) -> None:
445
+ self.tensors_accessed.ensure_tensor_exists(data_ptr)
446
+ self.tensors_accessed.delete_tensor(data_ptr)
447
+
448
+ def _handle_stream_creation(self, stream: StreamId) -> None:
449
+ self.syncs.create_stream(stream)
450
+
451
+ def _handle_device_synchronization(self) -> None:
452
+ self.syncs.sync_all_streams()
453
+
454
+ def _handle_stream_synchronization(self, stream: StreamId) -> None:
455
+ self.syncs.all_streams_wait_for_stream(stream)
456
+
457
+ def _handle_event_synchronization(self, event: EventId) -> None:
458
+ self.syncs.all_streams_wait_for_event(event)
459
+
460
+
461
+ def zip_by_key(a: Dict[TK, TVa], b: Dict[TK, TVb]) -> Iterator[Tuple[TK, TVa, TVb]]:
462
+ for arg, value in a.items():
463
+ if arg in b:
464
+ yield arg, value, b[arg]
465
+
466
+
467
+ def zip_arguments(
468
+ schema: torch.FunctionSchema, args: Tuple[Any, ...], kwargs: Dict[str, Any]
469
+ ) -> Iterator[Tuple[torch.Argument, Any]]:
470
+ schema_args = schema.arguments[: len(args)]
471
+ schema_kwargs = {arg.name: arg for arg in schema.arguments[len(args) :]}
472
+
473
+ yield from zip(schema_args, args)
474
+
475
+ for _, argument, value in zip_by_key(schema_kwargs, kwargs):
476
+ yield (argument, value)
477
+
478
+
479
+ class ArgumentHandler:
480
+ def __init__(self):
481
+ self.dataptrs_read: Set[DataPtr] = set()
482
+ self.dataptrs_written: Set[DataPtr] = set()
483
+ self.tensor_aliases: Dict[DataPtr, List[str]] = dict()
484
+ self.outputs: Set[DataPtr] = set()
485
+
486
+ def _handle_argument(
487
+ self,
488
+ value: Any,
489
+ is_write: bool,
490
+ name: Optional[str] = None,
491
+ is_output: bool = False,
492
+ ) -> None:
493
+ if isinstance(value, torch.Tensor) and value.is_cuda:
494
+ data_ptr = value.data_ptr()
495
+ if is_write:
496
+ self.dataptrs_written.add(data_ptr)
497
+ else:
498
+ self.dataptrs_read.add(data_ptr)
499
+
500
+ self.tensor_aliases.setdefault(data_ptr, [])
501
+ if name is not None:
502
+ self.tensor_aliases[data_ptr].append(name)
503
+ if is_output:
504
+ self.outputs.add(data_ptr)
505
+
506
+ def parse_inputs(
507
+ self,
508
+ schema: torch.FunctionSchema,
509
+ args: Tuple[Any, ...],
510
+ kwargs: Dict[str, Any],
511
+ ) -> None:
512
+ for argument, value in zip_arguments(schema, args, kwargs):
513
+ is_write = argument.alias_info is not None and argument.alias_info.is_write
514
+ pytree.tree_map_(
515
+ functools.partial(
516
+ self._handle_argument, is_write=is_write, name=argument.name
517
+ ),
518
+ value,
519
+ )
520
+
521
+ def parse_outputs(self, outputs: Any) -> None:
522
+ pytree.tree_map_(
523
+ functools.partial(self._handle_argument, is_write=True, is_output=True),
524
+ outputs,
525
+ )
526
+
527
+
528
+ class CUDASanitizerDispatchMode(TorchDispatchMode):
529
+ def __init__(self):
530
+ self.event_handler = EventHandler()
531
+ torch._C._activate_cuda_trace()
532
+ cuda_trace.register_callback_for_cuda_event_creation(
533
+ self.event_handler._handle_event_creation
534
+ )
535
+ cuda_trace.register_callback_for_cuda_event_deletion(
536
+ self.event_handler._handle_event_deletion
537
+ )
538
+ cuda_trace.register_callback_for_cuda_event_record(
539
+ self.event_handler._handle_event_record
540
+ )
541
+ cuda_trace.register_callback_for_cuda_event_wait(
542
+ self.event_handler._handle_event_wait
543
+ )
544
+ cuda_trace.register_callback_for_cuda_memory_allocation(
545
+ self.event_handler._handle_memory_allocation
546
+ )
547
+ cuda_trace.register_callback_for_cuda_memory_deallocation(
548
+ self.event_handler._handle_memory_deallocation
549
+ )
550
+ cuda_trace.register_callback_for_cuda_stream_creation(
551
+ self.event_handler._handle_stream_creation
552
+ )
553
+ cuda_trace.register_callback_for_cuda_device_synchronization(
554
+ self.event_handler._handle_device_synchronization
555
+ )
556
+ cuda_trace.register_callback_for_cuda_stream_synchronization(
557
+ self.event_handler._handle_stream_synchronization
558
+ )
559
+ cuda_trace.register_callback_for_cuda_event_synchronization(
560
+ self.event_handler._handle_event_synchronization
561
+ )
562
+
563
+ def __torch_dispatch__(self, func, types, args=(), kwargs=None):
564
+ if kwargs is None:
565
+ kwargs = {}
566
+
567
+ argument_handler = ArgumentHandler()
568
+ argument_handler.parse_inputs(func._schema, args, kwargs)
569
+
570
+ outputs = func(*args, **kwargs)
571
+
572
+ argument_handler.parse_outputs(outputs)
573
+ errors = self.event_handler._handle_kernel_launch(
574
+ torch.cuda.current_stream().cuda_stream,
575
+ argument_handler.dataptrs_read - argument_handler.dataptrs_written,
576
+ argument_handler.dataptrs_written,
577
+ argument_handler.outputs,
578
+ func._schema,
579
+ argument_handler.tensor_aliases,
580
+ )
581
+ if errors:
582
+ for error in errors:
583
+ print(error, file=sys.stderr)
584
+ raise CUDASanitizerErrors(errors)
585
+
586
+ return outputs
587
+
588
+
589
+ class CUDASanitizer:
590
+ """Manages the lifetime of a CUDASanitizer dispatch mode object.
591
+
592
+ The CUDASanitizer class wraps the entering/exiting functions of the dispatch mode
593
+ context manager in the enable function/destructor, respectively. This is to
594
+ explicitly set the lifetime of the dispatch mode object to that of the application.
595
+ This approach was deemed more elegant than using the atexit module.
596
+ """
597
+
598
+ def __init__(self):
599
+ self.dispatch = CUDASanitizerDispatchMode()
600
+ self.enabled = False
601
+
602
+ def enable(self):
603
+ self.dispatch.__enter__()
604
+ self.enabled = True
605
+
606
+ def __del__(self):
607
+ if self.enabled:
608
+ self.dispatch.__exit__(None, None, None)
609
+
610
+
611
+ def enable_cuda_sanitizer():
612
+ """Enable CUDA Sanitizer.
613
+
614
+ The sanitizer will begin to analyze low-level CUDA calls invoked by torch functions
615
+ for synchronization errors. All data races found will be printed to the standard
616
+ error output along with stack traces of suspected causes. For best results, the
617
+ sanitizer should be enabled at the very beginning of the program.
618
+ """
619
+ cuda_sanitizer.enable()
620
+
621
+
622
+ cuda_sanitizer = CUDASanitizer()
env-llmeval/lib/python3.10/site-packages/torch/cuda/_utils.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+
3
+ import torch
4
+
5
+ # The _get_device_index has been moved to torch.utils._get_device_index
6
+ from torch._utils import _get_device_index as _torch_get_device_index
7
+
8
+
9
+ def _get_device_index(
10
+ device: Any, optional: bool = False, allow_cpu: bool = False
11
+ ) -> int:
12
+ r"""Get the device index from :attr:`device`, which can be a torch.device object, a Python integer, or ``None``.
13
+
14
+ If :attr:`device` is a torch.device object, returns the device index if it
15
+ is a CUDA device. Note that for a CUDA device without a specified index,
16
+ i.e., ``torch.device('cuda')``, this will return the current default CUDA
17
+ device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
18
+ CPU devices will be accepted and ``-1`` will be returned in this case.
19
+
20
+ If :attr:`device` is a Python integer, it is returned as is.
21
+
22
+ If :attr:`device` is ``None``, this will return the current default CUDA
23
+ device if :attr:`optional` is ``True``.
24
+ """
25
+ if isinstance(device, int):
26
+ return device
27
+ if isinstance(device, str):
28
+ device = torch.device(device)
29
+ if isinstance(device, torch.device):
30
+ if allow_cpu:
31
+ if device.type not in ["cuda", "cpu"]:
32
+ raise ValueError(f"Expected a cuda or cpu device, but got: {device}")
33
+ elif device.type != "cuda":
34
+ raise ValueError(f"Expected a cuda device, but got: {device}")
35
+ if not torch.jit.is_scripting():
36
+ if isinstance(device, torch.cuda.device):
37
+ return device.idx
38
+ return _torch_get_device_index(device, optional, allow_cpu)
39
+
40
+
41
+ def _dummy_type(name: str) -> type:
42
+ def get_err_fn(is_init: bool):
43
+ def err_fn(obj, *args, **kwargs):
44
+ if is_init:
45
+ class_name = obj.__class__.__name__
46
+ else:
47
+ class_name = obj.__name__
48
+ raise RuntimeError(f"Tried to instantiate dummy base class {class_name}")
49
+
50
+ return err_fn
51
+
52
+ return type(
53
+ name, (object,), {"__init__": get_err_fn(True), "__new__": get_err_fn(False)}
54
+ )
env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from .autocast_mode import autocast, custom_bwd, custom_fwd
2
+ from .grad_scaler import GradScaler
3
+
4
+ __all__ = [
5
+ "autocast",
6
+ "custom_bwd",
7
+ "custom_fwd",
8
+ "GradScaler",
9
+ ]
env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (354 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc ADDED
Binary file (4.79 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc ADDED
Binary file (436 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import functools
3
+
4
+ import torch
5
+
6
+ try:
7
+ import numpy as np
8
+
9
+ HAS_NUMPY = True
10
+ except ModuleNotFoundError:
11
+ np = None # type: ignore[assignment]
12
+ from typing import Any
13
+
14
+ __all__ = ["autocast", "custom_fwd", "custom_bwd"]
15
+
16
+
17
+ class autocast(torch.amp.autocast_mode.autocast):
18
+ r"""See :class:`torch.autocast`.
19
+
20
+ ``torch.cuda.amp.autocast(args...)`` is equivalent to ``torch.autocast("cuda", args...)``
21
+ """
22
+
23
+ def __init__(
24
+ self,
25
+ enabled: bool = True,
26
+ dtype: torch.dtype = torch.float16,
27
+ cache_enabled: bool = True,
28
+ ):
29
+ if torch._jit_internal.is_scripting():
30
+ self._enabled = enabled
31
+ self.device = "cuda"
32
+ self.fast_dtype = dtype
33
+ return
34
+ super().__init__(
35
+ "cuda", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled
36
+ )
37
+
38
+ def __enter__(self):
39
+ if torch._jit_internal.is_scripting():
40
+ return self
41
+ return super().__enter__()
42
+
43
+ # TODO: discuss a unified TorchScript-friendly API for autocast
44
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override]
45
+ if torch._jit_internal.is_scripting():
46
+ return
47
+ return super().__exit__(exc_type, exc_val, exc_tb)
48
+
49
+ def __call__(self, func):
50
+ if torch._jit_internal.is_scripting():
51
+ return func
52
+ return super().__call__(func)
53
+
54
+
55
+ # Casts Tensors and containers of Tensors. Special-cases passthroughs for strings and np.ndarrays, which
56
+ # may be falsely detected as "Iterables."
57
+ def _cast(value, dtype):
58
+ if isinstance(value, torch.Tensor):
59
+ is_eligible = (
60
+ value.is_floating_point()
61
+ and value.is_cuda
62
+ and (value.dtype is not torch.float64)
63
+ )
64
+ return value.to(dtype) if is_eligible else value
65
+ elif isinstance(value, (str, bytes)):
66
+ return value
67
+ elif HAS_NUMPY and isinstance(value, np.ndarray):
68
+ return value
69
+ elif isinstance(value, collections.abc.Mapping):
70
+ return {_cast(k, dtype): _cast(v, dtype) for k, v in value.items()}
71
+ elif isinstance(value, collections.abc.Iterable):
72
+ iterable = (_cast(v, dtype) for v in value)
73
+ if isinstance(value, (list, tuple)):
74
+ return type(value)(iterable)
75
+ else:
76
+ return iterable
77
+ else:
78
+ return value
79
+
80
+
81
+ # custom_fwd is a decorator that may or may not be used with arguments, following
82
+ # https://github.com/dabeaz/python-cookbook/tree/master/src/9/defining_a_decorator_that_takes_an_optional_argument.
83
+ # this works:
84
+ # @custom_fwd
85
+ # def forward(...):
86
+ # this also works:
87
+ # @custom_fwd(cast_inputs=torch.float)
88
+ # def forward(...):
89
+ def custom_fwd(fwd=None, *, cast_inputs=None):
90
+ """
91
+ Create a helper decorator for ``forward`` methods of custom autograd functions.
92
+
93
+ Autograd functions are subclasses of :class:`torch.autograd.Function`.
94
+ See the :ref:`example page<amp-custom-examples>` for more detail.
95
+
96
+ Args:
97
+ cast_inputs (:class:`torch.dtype` or None, optional, default=None): If not ``None``,
98
+ when ``forward`` runs in an autocast-enabled region, casts incoming
99
+ floating-point CUDA Tensors to the target dtype (non-floating-point Tensors are not affected),
100
+ then executes ``forward`` with autocast disabled.
101
+ If ``None``, ``forward``'s internal ops execute with the current autocast state.
102
+
103
+ .. note::
104
+ If the decorated ``forward`` is called outside an autocast-enabled region,
105
+ :func:`custom_fwd<custom_fwd>` is a no-op and ``cast_inputs`` has no effect.
106
+ """
107
+ if fwd is None:
108
+ return functools.partial(custom_fwd, cast_inputs=cast_inputs)
109
+
110
+ @functools.wraps(fwd)
111
+ def decorate_fwd(*args, **kwargs):
112
+ args[0]._dtype = torch.get_autocast_gpu_dtype()
113
+ if cast_inputs is None:
114
+ args[0]._fwd_used_autocast = torch.is_autocast_enabled()
115
+ return fwd(*args, **kwargs)
116
+ else:
117
+ autocast_context = torch.is_autocast_enabled()
118
+ args[0]._fwd_used_autocast = False
119
+ if autocast_context:
120
+ with autocast(enabled=False):
121
+ return fwd(*_cast(args, cast_inputs), **_cast(kwargs, cast_inputs))
122
+ else:
123
+ return fwd(*args, **kwargs)
124
+
125
+ return decorate_fwd
126
+
127
+
128
+ # Autograd ensures incoming gradients are the same type as forward outputs. Allowing a separate
129
+ # cast_inputs argument on custom_bwd is unnecessary and could cause errors if it doesn't match
130
+ # cast_inputs supplied to custom_fwd.
131
+ def custom_bwd(bwd):
132
+ """Create a helper decorator for backward methods of custom autograd functions.
133
+
134
+ Autograd functions are subclasses of :class:`torch.autograd.Function`.
135
+ Ensures that ``backward`` executes with the same autocast state as ``forward``.
136
+ See the :ref:`example page<amp-custom-examples>` for more detail.
137
+ """
138
+
139
+ @functools.wraps(bwd)
140
+ def decorate_bwd(*args, **kwargs):
141
+ with autocast(enabled=args[0]._fwd_used_autocast, dtype=args[0]._dtype):
142
+ return bwd(*args, **kwargs)
143
+
144
+ return decorate_bwd
env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/common.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from importlib.util import find_spec
2
+
3
+ import torch
4
+
5
+ __all__ = ["amp_definitely_not_available"]
6
+
7
+
8
+ def amp_definitely_not_available():
9
+ return not (torch.cuda.is_available() or find_spec("torch_xla"))
env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py ADDED
@@ -0,0 +1,679 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ import warnings
5
+ from collections import abc, defaultdict
6
+ from enum import Enum
7
+ from typing import Any, cast, Dict, Iterable, List, Optional, overload, Tuple, Union
8
+
9
+ import torch
10
+ from .common import amp_definitely_not_available
11
+
12
+
13
+ __all__ = ["OptState", "GradScaler"]
14
+
15
+
16
+ class _MultiDeviceReplicator:
17
+ """Lazily serves copies of a tensor to requested devices.
18
+
19
+ Copies are cached per-device.
20
+ """
21
+
22
+ def __init__(self, master_tensor: torch.Tensor) -> None:
23
+ assert master_tensor.is_cuda or master_tensor.device.type == "xla"
24
+ self.master = master_tensor
25
+ self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
26
+
27
+ def get(self, device: torch.device) -> torch.Tensor:
28
+ retval = self._per_device_tensors.get(device, None)
29
+ if retval is None:
30
+ retval = self.master.to(device=device, non_blocking=True, copy=True)
31
+ self._per_device_tensors[device] = retval
32
+ return retval
33
+
34
+
35
+ # Defines default_factory for GradScaler's _per_optimizer_states defaultdict,
36
+ # as well as associated "enum" values. Prefers defining these at top level because
37
+ # - Lambdas can't be pickled, so we don't want to supply a lambda as the factory.
38
+ # - Defining READY, UNSCALED, STEPPED and _refresh_per_optimizer_state within GradScaler
39
+ # causes a circular reference, which we'd rather avoid.
40
+ class OptState(Enum):
41
+ READY = 0
42
+ UNSCALED = 1
43
+ STEPPED = 2
44
+
45
+
46
+ def _refresh_per_optimizer_state() -> Dict[str, Any]:
47
+ return {"stage": OptState.READY, "found_inf_per_device": {}}
48
+
49
+
50
+ class GradScaler:
51
+ """An instance ``scaler`` of :class:`GradScaler`.
52
+
53
+ Helps perform the steps of gradient scaling
54
+ conveniently.
55
+
56
+ * ``scaler.scale(loss)`` multiplies a given loss by ``scaler``'s current scale factor.
57
+ * ``scaler.step(optimizer)`` safely unscales gradients and calls ``optimizer.step()``.
58
+ * ``scaler.update()`` updates ``scaler``'s scale factor.
59
+
60
+ Example::
61
+
62
+ # Creates a GradScaler once at the beginning of training.
63
+ scaler = GradScaler()
64
+
65
+ for epoch in epochs:
66
+ for input, target in data:
67
+ optimizer.zero_grad()
68
+ output = model(input)
69
+ loss = loss_fn(output, target)
70
+
71
+ # Scales loss. Calls backward() on scaled loss to create scaled gradients.
72
+ scaler.scale(loss).backward()
73
+
74
+ # scaler.step() first unscales gradients of the optimizer's params.
75
+ # If gradients don't contain infs/NaNs, optimizer.step() is then called,
76
+ # otherwise, optimizer.step() is skipped.
77
+ scaler.step(optimizer)
78
+
79
+ # Updates the scale for next iteration.
80
+ scaler.update()
81
+
82
+ See the :ref:`Automatic Mixed Precision examples<amp-examples>` for usage
83
+ (along with autocasting) in more complex cases like gradient clipping, gradient accumulation, gradient penalty,
84
+ and multiple losses/optimizers.
85
+
86
+ ``scaler`` dynamically estimates the scale factor each iteration. To minimize gradient underflow,
87
+ a large scale factor should be used. However, ``float16`` values can "overflow" (become inf or NaN) if
88
+ the scale factor is too large. Therefore, the optimal scale factor is the largest factor that can be used
89
+ without incurring inf or NaN gradient values.
90
+ ``scaler`` approximates the optimal scale factor over time by checking the gradients for infs and NaNs during every
91
+ ``scaler.step(optimizer)`` (or optional separate ``scaler.unscale_(optimizer)``, see :meth:`unscale_`).
92
+
93
+ * If infs/NaNs are found, ``scaler.step(optimizer)`` skips the underlying ``optimizer.step()`` (so the params
94
+ themselves remain uncorrupted) and ``update()`` multiplies the scale by ``backoff_factor``.
95
+
96
+ * If no infs/NaNs are found, ``scaler.step(optimizer)`` runs the underlying ``optimizer.step()`` as usual.
97
+ If ``growth_interval`` unskipped iterations occur consecutively, ``update()`` multiplies the scale by
98
+ ``growth_factor``.
99
+
100
+ The scale factor often causes infs/NaNs to appear in gradients for the first few iterations as its
101
+ value calibrates. ``scaler.step`` will skip the underlying ``optimizer.step()`` for these
102
+ iterations. After that, step skipping should occur rarely (once every few hundred or thousand iterations).
103
+
104
+ Args:
105
+ init_scale (float, optional, default=2.**16): Initial scale factor.
106
+ growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during
107
+ :meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations.
108
+ backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during
109
+ :meth:`update` if inf/NaN gradients occur in an iteration.
110
+ growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients
111
+ that must occur for the scale to be multiplied by ``growth_factor``.
112
+ enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply
113
+ invokes the underlying ``optimizer.step()``, and other methods become no-ops.
114
+ Default: ``True``
115
+ """
116
+
117
+ def __init__(
118
+ self,
119
+ init_scale: float = 2.0**16,
120
+ growth_factor: float = 2.0,
121
+ backoff_factor: float = 0.5,
122
+ growth_interval: int = 2000,
123
+ enabled: bool = True,
124
+ ) -> None:
125
+ if enabled and amp_definitely_not_available():
126
+ warnings.warn(
127
+ "torch.cuda.amp.GradScaler is enabled, but CUDA is not available. Disabling."
128
+ )
129
+ self._enabled = False
130
+ else:
131
+ self._enabled = enabled
132
+
133
+ if self._enabled:
134
+ assert growth_factor > 1.0, "The growth factor must be > 1.0."
135
+ assert backoff_factor < 1.0, "The backoff factor must be < 1.0."
136
+
137
+ self._init_scale = init_scale
138
+ # self._scale will be lazily initialized during the first call to scale()
139
+ self._scale: Optional[torch.Tensor] = None
140
+ self._growth_factor = growth_factor
141
+ self._backoff_factor = backoff_factor
142
+ self._growth_interval = growth_interval
143
+ self._init_growth_tracker = 0
144
+ # self._growth_tracker will be lazily initialized during the first call to scale()
145
+ self._growth_tracker: Optional[torch.Tensor] = None
146
+ self._per_optimizer_states: Dict[int, Dict[str, Any]] = defaultdict(
147
+ _refresh_per_optimizer_state
148
+ )
149
+
150
+ def _check_scale_growth_tracker(
151
+ self, funcname: str
152
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
153
+ fix = "This may indicate your script did not use scaler.scale(loss or outputs) earlier in the iteration."
154
+ assert self._scale is not None, (
155
+ f"Attempted {funcname} but _scale is None. " + fix
156
+ )
157
+ assert self._growth_tracker is not None, (
158
+ f"Attempted {funcname} but _growth_tracker is None. " + fix
159
+ )
160
+ return (self._scale, self._growth_tracker)
161
+
162
+ def _lazy_init_scale_growth_tracker(self, dev: torch.device) -> None:
163
+ assert self._growth_tracker is None, "_growth_tracker initialized before _scale"
164
+ self._scale = torch.full((), self._init_scale, dtype=torch.float32, device=dev)
165
+ self._growth_tracker = torch.full(
166
+ (), self._init_growth_tracker, dtype=torch.int32, device=dev
167
+ )
168
+
169
+ @overload
170
+ def scale(self, outputs: torch.Tensor) -> torch.Tensor:
171
+ ...
172
+
173
+ @overload
174
+ def scale(self, outputs: List[torch.Tensor]) -> List[torch.Tensor]:
175
+ ...
176
+
177
+ @overload
178
+ def scale(self, outputs: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:
179
+ ...
180
+
181
+ @overload
182
+ def scale(self, outputs: Iterable[torch.Tensor]) -> Iterable[torch.Tensor]:
183
+ ...
184
+
185
+ def scale(
186
+ self,
187
+ outputs: Union[torch.Tensor, Iterable[torch.Tensor]],
188
+ ) -> Union[torch.Tensor, Iterable[torch.Tensor]]:
189
+ """
190
+ Multiplies ('scales') a tensor or list of tensors by the scale factor.
191
+
192
+ Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
193
+ unmodified.
194
+
195
+ Args:
196
+ outputs (Tensor or iterable of Tensors): Outputs to scale.
197
+ """
198
+ if not self._enabled:
199
+ return outputs
200
+
201
+ # Short-circuit for the common case.
202
+ if isinstance(outputs, torch.Tensor):
203
+ assert outputs.is_cuda or outputs.device.type == "xla"
204
+ if self._scale is None:
205
+ self._lazy_init_scale_growth_tracker(outputs.device)
206
+ assert self._scale is not None
207
+ return outputs * self._scale.to(device=outputs.device, non_blocking=True)
208
+
209
+ # Invoke the more complex machinery only if we're treating multiple outputs.
210
+ stash: List[
211
+ _MultiDeviceReplicator
212
+ ] = [] # holds a reference that can be overwritten by apply_scale
213
+
214
+ def apply_scale(val: Union[torch.Tensor, Iterable[torch.Tensor]]):
215
+ if isinstance(val, torch.Tensor):
216
+ assert val.is_cuda or val.device.type == "xla"
217
+ if len(stash) == 0:
218
+ if self._scale is None:
219
+ self._lazy_init_scale_growth_tracker(val.device)
220
+ assert self._scale is not None
221
+ stash.append(_MultiDeviceReplicator(self._scale))
222
+ return val * stash[0].get(val.device)
223
+ if isinstance(val, abc.Iterable):
224
+ iterable = map(apply_scale, val)
225
+ if isinstance(val, (list, tuple)):
226
+ return type(val)(iterable)
227
+ return iterable
228
+ raise ValueError("outputs must be a Tensor or an iterable of Tensors")
229
+
230
+ return apply_scale(outputs)
231
+
232
+ def _unscale_grads_(
233
+ self,
234
+ optimizer: torch.optim.Optimizer,
235
+ inv_scale: torch.Tensor,
236
+ found_inf: torch.Tensor,
237
+ allow_fp16: bool,
238
+ ) -> Dict[torch.device, torch.Tensor]:
239
+ per_device_inv_scale = _MultiDeviceReplicator(inv_scale)
240
+ per_device_found_inf = _MultiDeviceReplicator(found_inf)
241
+
242
+ # To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
243
+ # There could be hundreds of grads, so we'd like to iterate through them just once.
244
+ # However, we don't know their devices or dtypes in advance.
245
+
246
+ # https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
247
+ # Google says mypy struggles with defaultdicts type annotations.
248
+ per_device_and_dtype_grads: Dict[
249
+ torch.device, Dict[torch.dtype, List[torch.Tensor]]
250
+ ] = defaultdict(lambda: defaultdict(list))
251
+ with torch.no_grad():
252
+ for group in optimizer.param_groups:
253
+ for param in group["params"]:
254
+ assert isinstance(param, torch.Tensor)
255
+ if param.grad is None:
256
+ continue
257
+ if (not allow_fp16) and param.grad.dtype == torch.float16:
258
+ raise ValueError("Attempting to unscale FP16 gradients.")
259
+ if param.grad.is_sparse:
260
+ # is_coalesced() == False means the sparse grad has values with duplicate indices.
261
+ # coalesce() deduplicates indices and adds all values that have the same index.
262
+ # For scaled fp16 values, there's a good chance coalescing will cause overflow,
263
+ # so we should check the coalesced _values().
264
+ if param.grad.dtype is torch.float16:
265
+ param.grad = param.grad.coalesce()
266
+ to_unscale = param.grad._values()
267
+ else:
268
+ to_unscale = param.grad
269
+
270
+ # TODO: is there a way to split by device and dtype without appending in the inner loop?
271
+ per_device_and_dtype_grads[to_unscale.device][
272
+ to_unscale.dtype
273
+ ].append(to_unscale)
274
+
275
+ for device, per_dtype_grads in per_device_and_dtype_grads.items():
276
+ for grads in per_dtype_grads.values():
277
+ torch._amp_foreach_non_finite_check_and_unscale_(
278
+ grads,
279
+ per_device_found_inf.get(device),
280
+ per_device_inv_scale.get(device),
281
+ )
282
+
283
+ return per_device_found_inf._per_device_tensors
284
+
285
+ def unscale_(self, optimizer: torch.optim.Optimizer) -> None:
286
+ """
287
+ Divides ("unscales") the optimizer's gradient tensors by the scale factor.
288
+
289
+ :meth:`unscale_` is optional, serving cases where you need to
290
+ :ref:`modify or inspect gradients<working-with-unscaled-gradients>`
291
+ between the backward pass(es) and :meth:`step`.
292
+ If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`.
293
+
294
+ Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients::
295
+
296
+ ...
297
+ scaler.scale(loss).backward()
298
+ scaler.unscale_(optimizer)
299
+ torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
300
+ scaler.step(optimizer)
301
+ scaler.update()
302
+
303
+ Args:
304
+ optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled.
305
+
306
+ .. note::
307
+ :meth:`unscale_` does not incur a CPU-GPU sync.
308
+
309
+ .. warning::
310
+ :meth:`unscale_` should only be called once per optimizer per :meth:`step` call,
311
+ and only after all gradients for that optimizer's assigned parameters have been accumulated.
312
+ Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError.
313
+
314
+ .. warning::
315
+ :meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute.
316
+ """
317
+ if not self._enabled:
318
+ return
319
+
320
+ self._check_scale_growth_tracker("unscale_")
321
+
322
+ optimizer_state = self._per_optimizer_states[id(optimizer)]
323
+
324
+ if optimizer_state["stage"] is OptState.UNSCALED:
325
+ raise RuntimeError(
326
+ "unscale_() has already been called on this optimizer since the last update()."
327
+ )
328
+ elif optimizer_state["stage"] is OptState.STEPPED:
329
+ raise RuntimeError("unscale_() is being called after step().")
330
+
331
+ # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
332
+ assert self._scale is not None
333
+ inv_scale = self._scale.double().reciprocal().float()
334
+ found_inf = torch.full((), 0.0, dtype=torch.float32, device=self._scale.device)
335
+
336
+ optimizer_state["found_inf_per_device"] = self._unscale_grads_(
337
+ optimizer, inv_scale, found_inf, False
338
+ )
339
+ optimizer_state["stage"] = OptState.UNSCALED
340
+
341
+ def _maybe_opt_step(
342
+ self,
343
+ optimizer: torch.optim.Optimizer,
344
+ optimizer_state: Dict[str, Any],
345
+ *args: Any,
346
+ **kwargs: Any,
347
+ ) -> Optional[float]:
348
+ retval: Optional[float] = None
349
+ if not sum(v.item() for v in optimizer_state["found_inf_per_device"].values()):
350
+ retval = optimizer.step(*args, **kwargs)
351
+ return retval
352
+
353
+ def step(
354
+ self, optimizer: torch.optim.Optimizer, *args: Any, **kwargs: Any
355
+ ) -> Optional[float]:
356
+ """Invoke ``unscale_(optimizer)`` followed by parameter update, if gradients are not infs/NaN.
357
+
358
+ :meth:`step` carries out the following two operations:
359
+
360
+ 1. Internally invokes ``unscale_(optimizer)`` (unless :meth:`unscale_` was explicitly called for ``optimizer``
361
+ earlier in the iteration). As part of the :meth:`unscale_`, gradients are checked for infs/NaNs.
362
+ 2. If no inf/NaN gradients are found, invokes ``optimizer.step()`` using the unscaled
363
+ gradients. Otherwise, ``optimizer.step()`` is skipped to avoid corrupting the params.
364
+
365
+ ``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``.
366
+
367
+ Returns the return value of ``optimizer.step(*args, **kwargs)``.
368
+
369
+ Args:
370
+ optimizer (torch.optim.Optimizer): Optimizer that applies the gradients.
371
+ args: Any arguments.
372
+ kwargs: Any keyword arguments.
373
+
374
+ .. warning::
375
+ Closure use is not currently supported.
376
+ """
377
+ if not self._enabled:
378
+ return optimizer.step(*args, **kwargs)
379
+
380
+ if "closure" in kwargs:
381
+ raise RuntimeError(
382
+ "Closure use is not currently supported if GradScaler is enabled."
383
+ )
384
+
385
+ self._check_scale_growth_tracker("step")
386
+
387
+ optimizer_state = self._per_optimizer_states[id(optimizer)]
388
+
389
+ if optimizer_state["stage"] is OptState.STEPPED:
390
+ raise RuntimeError(
391
+ "step() has already been called since the last update()."
392
+ )
393
+
394
+ retval: Optional[float] = None
395
+
396
+ if getattr(optimizer, "_step_supports_amp_scaling", False):
397
+ # This optimizer has customized scale-handling logic, so we can call optimizer.step() directly.
398
+ # The contract with custom optimizers is that their step() should accept an additional,
399
+ # optional grad_scaler kwarg. We append self to the kwargs so the custom optimizer has full information:
400
+ # it can query its own state, invoke unscale_ on itself, etc
401
+ # The contract above is being deprecated to avoid introducing `grad_scaler: GradScaler` argument
402
+ # to `Optimizer.step`. The new behavior is going to add two Tensor attributes of `grad_scale`
403
+ # and `found_inf` to the passed optimizer so that the optimizer can utilize those
404
+ # to skip the parameter updates or unscale gradients before updating parameters in
405
+ # the fused kernel, e.g. `FusedAdamMathFunctor`.
406
+ # In this behavior, `GradScaler._check_inf_per_device` is called if `OptState.READY`,
407
+ # while the method is expected to be called by users side, i.e. their optimizers.
408
+ kwargs_ = kwargs
409
+ has_grad_scaler_kwarg = (
410
+ "grad_scaler" in inspect.signature(optimizer.step).parameters
411
+ )
412
+ if has_grad_scaler_kwarg:
413
+ warnings.warn(
414
+ "GradScaler is going to stop passing itself as a keyword argument to the passed "
415
+ "optimizer. In the near future GradScaler registers `grad_scale: Tensor` and "
416
+ "`found_inf: Tensor` to the passed optimizer and let the optimizer use them directly.",
417
+ FutureWarning,
418
+ )
419
+ kwargs_.update({"grad_scaler": self})
420
+ else:
421
+ if optimizer_state["stage"] is OptState.READY:
422
+ self._check_inf_per_device(optimizer)
423
+ scaler = self._get_scale_async()
424
+ assert scaler is not None
425
+ found_inf = cast(
426
+ torch.Tensor,
427
+ sum(
428
+ [
429
+ t.to(scaler.device, non_blocking=True)
430
+ for t in optimizer_state["found_inf_per_device"].values()
431
+ ]
432
+ ),
433
+ )
434
+ optimizer.grad_scale = ( # type: ignore[attr-defined]
435
+ None if optimizer_state["stage"] == OptState.UNSCALED else scaler
436
+ )
437
+ optimizer.found_inf = found_inf # type: ignore[attr-defined]
438
+ retval = optimizer.step(*args, **kwargs_)
439
+ optimizer_state["stage"] = OptState.STEPPED
440
+ if not has_grad_scaler_kwarg:
441
+ del optimizer.grad_scale # type: ignore[attr-defined]
442
+ del optimizer.found_inf # type: ignore[attr-defined]
443
+ return retval
444
+
445
+ if optimizer_state["stage"] is OptState.READY:
446
+ self.unscale_(optimizer)
447
+
448
+ assert (
449
+ len(optimizer_state["found_inf_per_device"]) > 0
450
+ ), "No inf checks were recorded for this optimizer."
451
+
452
+ retval = self._maybe_opt_step(optimizer, optimizer_state, *args, **kwargs)
453
+
454
+ optimizer_state["stage"] = OptState.STEPPED
455
+
456
+ return retval
457
+
458
+ def update(self, new_scale: Optional[Union[float, torch.Tensor]] = None) -> None:
459
+ """Update the scale factor.
460
+
461
+ If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
462
+ to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
463
+ the scale is multiplied by ``growth_factor`` to increase it.
464
+
465
+ Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
466
+ used directly, it's used to fill GradScaler's internal scale tensor. So if
467
+ ``new_scale`` was a tensor, later in-place changes to that tensor will not further
468
+ affect the scale GradScaler uses internally.)
469
+
470
+ Args:
471
+ new_scale (float or :class:`torch.cuda.FloatTensor`, optional, default=None): New scale factor.
472
+
473
+ .. warning::
474
+ :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
475
+ been invoked for all optimizers used this iteration.
476
+
477
+ .. warning::
478
+ For performance reasons, we do not check the scale factor value to avoid synchronizations,
479
+ so the scale factor is not guaranteed to be above 1. If the scale falls below 1 and/or
480
+ you are seeing NaNs in your gradients or loss, something is likely wrong. For example,
481
+ bf16-pretrained models are often incompatible with AMP/fp16 due to differing dynamic ranges.
482
+ """
483
+ if not self._enabled:
484
+ return
485
+
486
+ _scale, _growth_tracker = self._check_scale_growth_tracker("update")
487
+
488
+ if new_scale is not None:
489
+ assert self._scale is not None
490
+ # Accept a new user-defined scale.
491
+ if isinstance(new_scale, float):
492
+ self._scale.fill_(new_scale)
493
+ else:
494
+ reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False."
495
+ assert isinstance(new_scale, torch.cuda.FloatTensor), reason # type: ignore[attr-defined]
496
+ assert new_scale.numel() == 1, reason
497
+ assert new_scale.requires_grad is False, reason
498
+ self._scale.copy_(new_scale)
499
+ else:
500
+ # Consume shared inf/nan data collected from optimizers to update the scale.
501
+ # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
502
+ found_infs = [
503
+ found_inf.to(device=_scale.device, non_blocking=True)
504
+ for state in self._per_optimizer_states.values()
505
+ for found_inf in state["found_inf_per_device"].values()
506
+ ]
507
+
508
+ assert len(found_infs) > 0, "No inf checks were recorded prior to update."
509
+
510
+ found_inf_combined = found_infs[0]
511
+ if len(found_infs) > 1:
512
+ for i in range(1, len(found_infs)):
513
+ found_inf_combined += found_infs[i]
514
+
515
+ torch._amp_update_scale_(
516
+ _scale,
517
+ _growth_tracker,
518
+ found_inf_combined,
519
+ self._growth_factor,
520
+ self._backoff_factor,
521
+ self._growth_interval,
522
+ )
523
+
524
+ # To prepare for next iteration, clear the data collected from optimizers this iteration.
525
+ self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
526
+
527
+ def _get_scale_async(self) -> Optional[torch.Tensor]:
528
+ return self._scale
529
+
530
+ def get_scale(self) -> float:
531
+ """Return a Python float containing the current scale, or 1.0 if scaling is disabled.
532
+
533
+ .. warning::
534
+ :meth:`get_scale` incurs a CPU-GPU sync.
535
+ """
536
+ if self._enabled:
537
+ return (
538
+ self._init_scale
539
+ if (scale := self._get_scale_async()) is None
540
+ else cast(float, scale.item())
541
+ )
542
+ return 1.0
543
+
544
+ def get_growth_factor(self) -> float:
545
+ r"""Return a Python float containing the scale growth factor."""
546
+ return self._growth_factor
547
+
548
+ def set_growth_factor(self, new_factor: float) -> None:
549
+ r"""Set a new scale growth factor.
550
+
551
+ Args:
552
+ new_scale (float): Value to use as the new scale growth factor.
553
+ """
554
+ self._growth_factor = new_factor
555
+
556
+ def get_backoff_factor(self) -> float:
557
+ r"""Return a Python float containing the scale backoff factor."""
558
+ return self._backoff_factor
559
+
560
+ def set_backoff_factor(self, new_factor: float) -> None:
561
+ r"""Set a new scale backoff factor.
562
+
563
+ Args:
564
+ new_scale (float): Value to use as the new scale backoff factor.
565
+ """
566
+ self._backoff_factor = new_factor
567
+
568
+ def get_growth_interval(self) -> int:
569
+ r"""Return a Python int containing the growth interval."""
570
+ return self._growth_interval
571
+
572
+ def set_growth_interval(self, new_interval: int) -> None:
573
+ r"""Set a new growth interval.
574
+
575
+ Args:
576
+ new_interval (int): Value to use as the new growth interval.
577
+ """
578
+ self._growth_interval = new_interval
579
+
580
+ def _get_growth_tracker(self) -> int:
581
+ if self._enabled:
582
+ return (
583
+ self._init_growth_tracker
584
+ if self._growth_tracker is None
585
+ else cast(int, self._growth_tracker.item())
586
+ )
587
+ return 0
588
+
589
+ def is_enabled(self) -> bool:
590
+ r"""Return a bool indicating whether this instance is enabled."""
591
+ return self._enabled
592
+
593
+ def state_dict(self) -> Dict[str, Any]:
594
+ r"""Return the state of the scaler as a :class:`dict`.
595
+
596
+ It contains five entries:
597
+
598
+ * ``"scale"`` - a Python float containing the current scale
599
+ * ``"growth_factor"`` - a Python float containing the current growth factor
600
+ * ``"backoff_factor"`` - a Python float containing the current backoff factor
601
+ * ``"growth_interval"`` - a Python int containing the current growth interval
602
+ * ``"_growth_tracker"`` - a Python int containing the number of recent consecutive unskipped steps.
603
+
604
+ If this instance is not enabled, returns an empty dict.
605
+
606
+ .. note::
607
+ If you wish to checkpoint the scaler's state after a particular iteration, :meth:`state_dict`
608
+ should be called after :meth:`update`.
609
+ """
610
+ if self._enabled:
611
+ return {
612
+ "scale": self.get_scale(),
613
+ "growth_factor": self._growth_factor,
614
+ "backoff_factor": self._backoff_factor,
615
+ "growth_interval": self._growth_interval,
616
+ "_growth_tracker": self._get_growth_tracker(),
617
+ }
618
+ return {}
619
+
620
+ def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
621
+ r"""Load the scaler state.
622
+
623
+ If this instance is disabled, :meth:`load_state_dict` is a no-op.
624
+
625
+ Args:
626
+ state_dict(dict): scaler state. Should be an object returned from a call to :meth:`state_dict`.
627
+ """
628
+ if not self._enabled:
629
+ return
630
+
631
+ if len(state_dict) == 0:
632
+ raise RuntimeError(
633
+ "The source state dict is empty, possibly because it was saved "
634
+ "from a disabled instance of GradScaler."
635
+ )
636
+
637
+ self._init_scale = cast(float, state_dict["scale"])
638
+ if self._scale is not None:
639
+ self._scale.fill_(state_dict["scale"])
640
+ self._growth_factor = cast(float, state_dict["growth_factor"])
641
+ self._backoff_factor = cast(float, state_dict["backoff_factor"])
642
+ self._growth_interval = cast(int, state_dict["growth_interval"])
643
+ self._init_growth_tracker = cast(int, state_dict["_growth_tracker"])
644
+ if self._growth_tracker is not None:
645
+ self._growth_tracker.fill_(state_dict["_growth_tracker"])
646
+
647
+ def __getstate__(self) -> Dict[str, Any]:
648
+ state = self.__dict__.copy()
649
+ if self._enabled:
650
+ assert len(self._per_optimizer_states) == 0, (
651
+ "A GradScaler instance may only be pickled at the beginning "
652
+ "of an iteration, or at the end after scaler.update()."
653
+ )
654
+ # Pickling _scale and _growth_tracker Tensors directly triggers
655
+ # "warnings.warn("pickle support for Storage will be removed in 1.5..."
656
+ # so instead, we set the unpickled instance up to reinitialize them lazily.
657
+ state["_init_scale"] = self.get_scale()
658
+ state["_init_growth_tracker"] = self._get_growth_tracker()
659
+ state["_scale"] = None
660
+ state["_growth_tracker"] = None
661
+ return state
662
+
663
+ def __setstate__(self, state: Dict[str, Any]) -> None:
664
+ self.__dict__.update(state)
665
+
666
+ def _check_inf_per_device(self, optimizer: torch.optim.Optimizer) -> Dict[str, Any]:
667
+ _scale, _ = self._check_scale_growth_tracker("_check_inf_per_device")
668
+
669
+ dummy_inv_scale = torch.full((), 1.0, dtype=torch.float32, device=_scale.device)
670
+ found_inf = torch.full((), 0.0, dtype=torch.float32, device=_scale.device)
671
+
672
+ self._per_optimizer_states[id(optimizer)][
673
+ "found_inf_per_device"
674
+ ] = self._unscale_grads_(optimizer, dummy_inv_scale, found_inf, True)
675
+
676
+ return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]
677
+
678
+ def _found_inf_per_device(self, optimizer: torch.optim.Optimizer) -> Dict[str, Any]:
679
+ return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]
env-llmeval/lib/python3.10/site-packages/torch/cuda/comm.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The functions here have been moved to torch.nn.parallel.comm
2
+ from torch.nn.parallel.comm import (
3
+ broadcast,
4
+ broadcast_coalesced,
5
+ gather,
6
+ reduce_add,
7
+ reduce_add_coalesced,
8
+ scatter,
9
+ )
10
+
11
+ __all__ = [
12
+ "broadcast",
13
+ "broadcast_coalesced",
14
+ "reduce_add",
15
+ "reduce_add_coalesced",
16
+ "scatter",
17
+ "gather",
18
+ ]
env-llmeval/lib/python3.10/site-packages/torch/cuda/graphs.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+
3
+ import torch
4
+ from torch.utils import _pytree
5
+ from ._utils import _dummy_type
6
+
7
+ if not hasattr(torch._C, "_CudaStreamBase"):
8
+ # Define dummy base classes
9
+ torch._C.__dict__["_CUDAGraph"] = _dummy_type("_CUDAGraph")
10
+ torch._C.__dict__["_graph_pool_handle"] = _dummy_type("_graph_pool_handle")
11
+ torch._C.__dict__["_cuda_isCurrentStreamCapturing"] = _dummy_type(
12
+ "_cuda_isCurrentStreamCapturing"
13
+ )
14
+
15
+ from torch._C import ( # noqa: F401
16
+ _cuda_isCurrentStreamCapturing,
17
+ _CUDAGraph,
18
+ _graph_pool_handle,
19
+ )
20
+
21
+
22
+ def is_current_stream_capturing():
23
+ r"""Return True if CUDA graph capture is underway on the current CUDA stream, False otherwise.
24
+
25
+ If a CUDA context does not exist on the current device, returns False without initializing the context.
26
+ """
27
+ return _cuda_isCurrentStreamCapturing()
28
+
29
+
30
+ # Python shim helps Sphinx process docstrings more reliably.
31
+ def graph_pool_handle():
32
+ r"""Return an opaque token representing the id of a graph memory pool.
33
+
34
+ See :ref:`Graph memory management<graph-memory-management>`.
35
+
36
+ .. warning::
37
+ This API is in beta and may change in future releases.
38
+ """
39
+ return _graph_pool_handle()
40
+
41
+
42
+ # Python shim helps Sphinx process docstrings more reliably.
43
+ class CUDAGraph(torch._C._CUDAGraph):
44
+ r"""Wrapper around a CUDA graph.
45
+
46
+ .. warning::
47
+ This API is in beta and may change in future releases.
48
+ """
49
+
50
+ def __new__(cls):
51
+ return super().__new__(cls)
52
+
53
+ def capture_begin(self, pool=None, capture_error_mode="global"):
54
+ r"""Begin capturing CUDA work on the current stream.
55
+
56
+ Typically, you shouldn't call ``capture_begin`` yourself.
57
+ Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`,
58
+ which call ``capture_begin`` internally.
59
+
60
+ Arguments:
61
+ pool (optional): Token (returned by :func:`~torch.cuda.graph_pool_handle` or
62
+ :meth:`other_Graph_instance.pool()<torch.cuda.CUDAGraph.pool>`) that hints this graph may share memory
63
+ with the indicated pool. See :ref:`Graph memory management<graph-memory-management>`.
64
+ capture_error_mode (str, optional): specifies the cudaStreamCaptureMode for the graph capture stream.
65
+ Can be "global", "thread_local" or "relaxed". During cuda graph capture, some actions, such as cudaMalloc,
66
+ may be unsafe. "global" will error on actions in other threads, "thread_local" will only error for
67
+ actions in the current thread, and "relaxed" will not error on these actions. Do NOT change this setting
68
+ unless you're familiar with `cudaStreamCaptureMode <https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html#group__CUDART__STREAM_1g9d0535d93a214cbf126835257b16ba85>`_
69
+ """ # noqa: B950
70
+ super().capture_begin(pool=pool, capture_error_mode=capture_error_mode)
71
+
72
+ def capture_end(self):
73
+ r"""End CUDA graph capture on the current stream.
74
+
75
+ After ``capture_end``, ``replay`` may be called on this instance.
76
+
77
+ Typically, you shouldn't call ``capture_end`` yourself.
78
+ Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`,
79
+ which call ``capture_end`` internally.
80
+ """
81
+ super().capture_end()
82
+
83
+ def replay(self):
84
+ r"""Replay the CUDA work captured by this graph."""
85
+ super().replay()
86
+
87
+ def reset(self):
88
+ r"""Delete the graph currently held by this instance."""
89
+ super().reset()
90
+
91
+ def pool(self):
92
+ r"""Return an opaque token representing the id of this graph's memory pool.
93
+
94
+ This id can optionally be passed to another graph's ``capture_begin``,
95
+ which hints the other graph may share the same memory pool.
96
+ """
97
+ return super().pool()
98
+
99
+ def enable_debug_mode(self):
100
+ r"""Enable debugging mode for CUDAGraph.debug_dump."""
101
+ return super().enable_debug_mode()
102
+
103
+ def debug_dump(self, debug_path):
104
+ r"""
105
+ Arguments:
106
+ debug_path (required): Path to dump the graph to.
107
+
108
+ Calls a debugging function to dump the graph if the debugging is
109
+ enabled via CUDAGraph.enable_debug_mode()
110
+ """
111
+ return super().debug_dump(debug_path)
112
+
113
+
114
+ class graph:
115
+ r"""Context-manager that captures CUDA work into a :class:`torch.cuda.CUDAGraph` object for later replay.
116
+
117
+ See :ref:`CUDA Graphs <cuda-graph-semantics>` for a general introduction,
118
+ detailed use, and constraints.
119
+
120
+ Arguments:
121
+ cuda_graph (torch.cuda.CUDAGraph): Graph object used for capture.
122
+ pool (optional): Opaque token (returned by a call to :func:`~torch.cuda.graph_pool_handle()` or
123
+ :meth:`other_Graph_instance.pool()<torch.cuda.CUDAGraph.pool>`) hinting this graph's capture
124
+ may share memory from the specified pool. See :ref:`Graph memory management<graph-memory-management>`.
125
+ stream (torch.cuda.Stream, optional): If supplied, will be set as the current stream in the context.
126
+ If not supplied, ``graph`` sets its own internal side stream as the current stream in the context.
127
+ capture_error_mode (str, optional): specifies the cudaStreamCaptureMode for the graph capture stream.
128
+ Can be "global", "thread_local" or "relaxed". During cuda graph capture, some actions, such as cudaMalloc,
129
+ may be unsafe. "global" will error on actions in other threads, "thread_local" will only error for
130
+ actions in the current thread, and "relaxed" will not error on actions. Do NOT change this setting
131
+ unless you're familiar with `cudaStreamCaptureMode <https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html#group__CUDART__STREAM_1g9d0535d93a214cbf126835257b16ba85>`_
132
+
133
+ .. note::
134
+ For effective memory sharing, if you pass a ``pool`` used by a previous capture and the previous capture
135
+ used an explicit ``stream`` argument, you should pass the same ``stream`` argument to this capture.
136
+
137
+ .. warning::
138
+ This API is in beta and may change in future releases.
139
+
140
+ .. _cudaStreamCaptureMode:
141
+ https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html#group__CUDART__STREAM_1g9d0535d93a214cbf126835257b16ba85
142
+ """ # noqa: B950
143
+
144
+ default_capture_stream = None
145
+
146
+ def __init__(
147
+ self,
148
+ cuda_graph,
149
+ pool=None,
150
+ stream=None,
151
+ capture_error_mode: str = "global",
152
+ ):
153
+ # Lazy-init of default_capture_stream helps avoid circular-import errors.
154
+ # Not thread safe, but graphs already have the general (explicitly documented)
155
+ # restriction that only one capture may be underway at a time in the process.
156
+ if self.__class__.default_capture_stream is None:
157
+ self.__class__.default_capture_stream = torch.cuda.Stream()
158
+
159
+ self.pool = () if pool is None else (pool,)
160
+ self.capture_stream = (
161
+ stream if stream is not None else self.__class__.default_capture_stream
162
+ )
163
+ assert self.capture_stream is not None
164
+ self.stream_ctx = torch.cuda.stream(self.capture_stream)
165
+ self.cuda_graph = cuda_graph
166
+ self.capture_error_mode = capture_error_mode
167
+
168
+ def __enter__(self):
169
+ # Free as much memory as we can for the graph
170
+ torch.cuda.synchronize()
171
+ gc.collect()
172
+ torch.cuda.empty_cache()
173
+
174
+ # Stackoverflow seems comfortable with this pattern
175
+ # https://stackoverflow.com/questions/26635684/calling-enter-and-exit-manually#39172487
176
+ self.stream_ctx.__enter__()
177
+
178
+ self.cuda_graph.capture_begin(
179
+ *self.pool, capture_error_mode=self.capture_error_mode
180
+ )
181
+
182
+ def __exit__(self, exc_type, exc_value, traceback):
183
+ self.cuda_graph.capture_end()
184
+ self.stream_ctx.__exit__(exc_type, exc_value, traceback)
185
+ # returning None should propagate exceptions from either capture_end or stream_ctx.__exit__()
186
+
187
+
188
+ def make_graphed_callables(
189
+ callables, sample_args, num_warmup_iters=3, allow_unused_input=False
190
+ ):
191
+ r"""Accept callables (functions or :class:`nn.Module<torch.nn.Module>`\ s) and returns graphed versions.
192
+
193
+ Each graphed callable's forward pass runs its source callable's
194
+ forward CUDA work as a CUDA graph inside a single autograd node.
195
+
196
+ The graphed callable's forward pass also appends
197
+ a backward node to the autograd graph. During backward, this node runs the
198
+ callable's backward work as a CUDA graph.
199
+
200
+ Therefore, each graphed callable should be a drop-in replacement for its source callable
201
+ in an autograd-enabled training loop.
202
+
203
+ See :ref:`Partial-network capture<partial-network-capture>` for detailed use and constraints.
204
+
205
+ If you pass a tuple of several callables, their captures will use the same memory pool.
206
+ See :ref:`Graph memory management<graph-memory-management>` for when this is appropriate.
207
+
208
+ Arguments:
209
+ callables (torch.nn.Module or Python function, or tuple of these): Callable or callables to graph.
210
+ See :ref:`Graph memory management<graph-memory-management>` for when passing a tuple of callables
211
+ is appropriate. If you pass a tuple of callables, their order in the tuple must be the same order
212
+ they'll run in the live workload.
213
+ sample_args (tuple of Tensors, or tuple of tuples of Tensors): Samples args for each callable.
214
+ If a single callable was passed, ``sample_args`` must be a single tuple of argument Tensors.
215
+ If a tuple of callables was passed, ``sample_args`` must be tuple of tuples of argument Tensors.
216
+ num_warmup_iters (int): The number of warmup iterations. Currently, ``DataDistributedParallel`` needs
217
+ 11 iterations for warm up. Default: ``3``.
218
+ allow_unused_input (bool): If False, specifying inputs that were not used when computing outputs
219
+ (and therefore their grad is always zero) is an error. Defaults to False.
220
+
221
+ .. note::
222
+ The ``requires_grad`` state of each Tensor in ``sample_args`` must match the state
223
+ that's expected for the corresponding real input in the training loop.
224
+
225
+ .. warning::
226
+ This API is in beta and may change in future releases.
227
+
228
+ .. warning::
229
+ ``sample_args`` for each callable must contain only Tensors. Other types are not allowed.
230
+
231
+ .. warning::
232
+ Returned callables do not support higher order differentiation (e.g., double backward).
233
+
234
+ .. warning::
235
+ In any :class:`~torch.nn.Module` passed to :func:`~make_graphed_callables`, only parameters
236
+ may be trainable. Buffers must have ``requires_grad=False``.
237
+
238
+ .. warning::
239
+ After you pass a :class:`torch.nn.Module` through :func:`~make_graphed_callables`,
240
+ you may not add or remove any of that Module's parameters or buffers.
241
+
242
+ .. warning::
243
+ :class:`torch.nn.Module`\s passed to :func:`~torch.cuda.make_graphed_callables` must not have module hooks
244
+ registered on them at the time they are passed. However, registering hooks on modules *after* passing them
245
+ through :func:`~torch.cuda.make_graphed_callables` is allowed.
246
+
247
+ .. warning::
248
+ When running a graphed callable, you must pass its arguments in the same order and format
249
+ they appeared in that callable's ``sample_args``.
250
+
251
+ .. warning::
252
+ The automatic mixed precision is supported in :func:`~torch.cuda.make_graphed_callables` only with disabled
253
+ caching. The context manager `torch.cuda.amp.autocast()` must have `cache_enabled=False`.
254
+ """
255
+ if torch.is_autocast_enabled() and torch.is_autocast_cache_enabled():
256
+ raise RuntimeError(
257
+ "make_graphed_callables does not support the autocast caching. Please set `cache_enabled=False`."
258
+ )
259
+
260
+ just_one_callable = False
261
+
262
+ if not isinstance(callables, tuple):
263
+ just_one_callable = True
264
+ callables = (callables,)
265
+ sample_args = (sample_args,)
266
+
267
+ flatten_sample_args = []
268
+
269
+ for c, args in zip(callables, sample_args):
270
+ if isinstance(c, torch.nn.Module):
271
+ assert (
272
+ len(c._backward_hooks) == 0
273
+ and len(c._forward_hooks) == 0
274
+ and len(c._forward_pre_hooks) == 0
275
+ ), (
276
+ "Modules must not have hooks registered at the time they are passed. However, registering hooks "
277
+ + "on modules after passing them through make_graphed_callables is allowed."
278
+ )
279
+ assert all(b.requires_grad is False for b in c.buffers()), (
280
+ "In any :class:`~torch.nn.Module` passed to "
281
+ + ":func:`~make_graphed_callables`, only parameters may be trainable. All buffers must have "
282
+ + "``requires_grad=False``."
283
+ )
284
+ flatten_arg = _pytree.arg_tree_leaves(*args)
285
+ flatten_sample_args.append(tuple(flatten_arg))
286
+ assert all(isinstance(arg, torch.Tensor) for arg in flatten_arg), (
287
+ "In the beta API, sample_args "
288
+ + "for each callable must contain only Tensors. Other types are not allowed."
289
+ )
290
+
291
+ # If a callable is an nn.Module, its graph's full input surface is the args the user explicitly
292
+ # passes to forward (ie, its sample_args) AND the module's parameter attributes.
293
+ per_callable_len_user_args = [len(args) for args in flatten_sample_args]
294
+ per_callable_module_params = [
295
+ tuple(c.parameters()) if isinstance(c, torch.nn.Module) else ()
296
+ for c in callables
297
+ ]
298
+ per_callable_static_input_surfaces = [
299
+ flatten_sample_args[i] + per_callable_module_params[i]
300
+ for i in range(len(callables))
301
+ ]
302
+
303
+ fwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))]
304
+ bwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))]
305
+
306
+ mempool = graph_pool_handle()
307
+
308
+ # Warmup
309
+ # Hopefully prevents cudnn benchmarking and other lazy-initialization cuda work
310
+ # from ending up in any captures.
311
+ torch.cuda.synchronize()
312
+ with torch.cuda.stream(torch.cuda.Stream()):
313
+ for func, args, static_input_surface in zip(
314
+ callables, sample_args, per_callable_static_input_surfaces
315
+ ):
316
+ for _ in range(num_warmup_iters):
317
+ outputs = _pytree.tree_leaves(func(*args))
318
+ grad_inputs = torch.autograd.grad(
319
+ outputs=tuple(o for o in outputs if o.requires_grad),
320
+ inputs=tuple(i for i in static_input_surface if i.requires_grad),
321
+ grad_outputs=tuple(
322
+ torch.empty_like(o) for o in outputs if o.requires_grad
323
+ ),
324
+ only_inputs=True,
325
+ allow_unused=allow_unused_input,
326
+ )
327
+ del outputs, grad_inputs
328
+ torch.cuda.synchronize()
329
+
330
+ # All captures here share a mempool. To avoid replays corrupting each other's memory,
331
+ # the safest approach is to capture all passes in the same order they'll run:
332
+ # fwd 1, fwd 2, ... fwd N, then bwd N, bwd N-1, ... bwd 1.
333
+
334
+ # Capture forward graphs
335
+ per_callable_static_outputs = []
336
+ per_callable_output_unflatten_spec = []
337
+ for func, args, fwd_graph in zip(callables, sample_args, fwd_graphs):
338
+ with torch.cuda.graph(fwd_graph, pool=mempool):
339
+ outputs = func(*args)
340
+
341
+ flatten_outputs, spec = _pytree.tree_flatten(outputs)
342
+ per_callable_static_outputs.append(tuple(flatten_outputs))
343
+ per_callable_output_unflatten_spec.append(spec)
344
+
345
+ # Capture backward graphs in reverse order
346
+ per_callable_static_grad_outputs = []
347
+ per_callable_static_grad_inputs = []
348
+ for static_input_surface, static_outputs, bwd_graph, module_params in zip(
349
+ reversed(per_callable_static_input_surfaces),
350
+ reversed(per_callable_static_outputs),
351
+ reversed(bwd_graphs),
352
+ reversed(per_callable_module_params),
353
+ ):
354
+ # For now, assumes all static_outputs require grad
355
+ # assert all(o.requires_grad for o in static_outputs), "Outputs of graphed callables must require grad."
356
+ static_grad_outputs = tuple(
357
+ torch.empty_like(o) if o.requires_grad else None for o in static_outputs
358
+ )
359
+
360
+ with torch.cuda.graph(bwd_graph, pool=mempool):
361
+ grad_inputs = torch.autograd.grad(
362
+ outputs=tuple(o for o in static_outputs if o.requires_grad),
363
+ inputs=tuple(i for i in static_input_surface if i.requires_grad),
364
+ grad_outputs=tuple(o for o in static_grad_outputs if o is not None),
365
+ only_inputs=True,
366
+ allow_unused=allow_unused_input,
367
+ )
368
+
369
+ # Constructs a tuple suitable for returning from Graphed.backward:
370
+ # Pads out the actually-needed grads with Nones in gradient slots for inputs that don't require grad.
371
+ # I couldn't think of a slick one-liner for this pattern.
372
+ static_grad_inputs = []
373
+ grad_idx = 0
374
+ for arg in static_input_surface:
375
+ if arg.requires_grad:
376
+ static_grad_inputs.append(grad_inputs[grad_idx])
377
+ grad_idx += 1
378
+ else:
379
+ static_grad_inputs.append(None) # type: ignore[arg-type]
380
+ static_grad_inputs = tuple(static_grad_inputs) # type: ignore[assignment]
381
+
382
+ per_callable_static_grad_outputs.append(static_grad_outputs)
383
+ per_callable_static_grad_inputs.append(static_grad_inputs)
384
+
385
+ # Reverses the most recent two lists
386
+ per_callable_static_grad_outputs = list(reversed(per_callable_static_grad_outputs))
387
+ per_callable_static_grad_inputs = list(reversed(per_callable_static_grad_inputs))
388
+ # Now for every per_callable list, per_callable_*[i] holds the stuff for the ith callable.
389
+
390
+ def make_graphed_autograd_function(
391
+ fwd_graph,
392
+ bwd_graph,
393
+ module_params,
394
+ len_user_args,
395
+ output_unflatten_spec,
396
+ static_input_surface,
397
+ static_outputs,
398
+ static_grad_outputs,
399
+ static_grad_inputs,
400
+ ):
401
+ class Graphed(torch.autograd.Function):
402
+ @staticmethod
403
+ def forward(ctx, *inputs):
404
+ # At this stage, only the user args may (potentially) be new tensors.
405
+ for i in range(len_user_args):
406
+ if static_input_surface[i].data_ptr() != inputs[i].data_ptr():
407
+ static_input_surface[i].copy_(inputs[i])
408
+ fwd_graph.replay()
409
+ assert isinstance(static_outputs, tuple)
410
+ return tuple(o.detach() for o in static_outputs)
411
+
412
+ @staticmethod
413
+ @torch.autograd.function.once_differentiable
414
+ def backward(ctx, *grads):
415
+ assert len(grads) == len(static_grad_outputs)
416
+ for g, grad in zip(static_grad_outputs, grads):
417
+ if g is not None:
418
+ # don't copy if autograd gods have been kind and the
419
+ # incoming grad is already in the right place
420
+ if g.data_ptr() != grad.data_ptr():
421
+ g.copy_(grad)
422
+ bwd_graph.replay()
423
+
424
+ # Input args that didn't require grad expect a None gradient.
425
+ assert isinstance(static_grad_inputs, tuple)
426
+ return tuple(
427
+ b.detach() if b is not None else b for b in static_grad_inputs
428
+ )
429
+
430
+ def functionalized(*user_args):
431
+ # Runs the autograd function with inputs == all inputs to the graph that might require grad
432
+ # (explicit user args + module parameters)
433
+ # Assumes module params didn't change since capture.
434
+ flatten_user_args = _pytree.arg_tree_leaves(*user_args)
435
+ out = Graphed.apply(*(tuple(flatten_user_args) + module_params))
436
+ return _pytree.tree_unflatten(out, output_unflatten_spec)
437
+
438
+ return functionalized
439
+
440
+ # Put together the final graphed callables
441
+ ret = []
442
+ for i, func in enumerate(callables):
443
+ graphed = make_graphed_autograd_function(
444
+ fwd_graphs[i],
445
+ bwd_graphs[i],
446
+ per_callable_module_params[i],
447
+ per_callable_len_user_args[i],
448
+ per_callable_output_unflatten_spec[i],
449
+ per_callable_static_input_surfaces[i],
450
+ per_callable_static_outputs[i],
451
+ per_callable_static_grad_outputs[i],
452
+ per_callable_static_grad_inputs[i],
453
+ )
454
+
455
+ if isinstance(func, torch.nn.Module):
456
+
457
+ def make_graphed_forward(func, graph_training_state, graphed, orig_fwd):
458
+ def new_fwd(*user_args):
459
+ # If the module's training-or-eval state matches what we graphed,
460
+ # run the graph, otherwise run the original forward method
461
+ if func.training == graph_training_state:
462
+ return graphed(*user_args)
463
+ else:
464
+ return orig_fwd(*user_args)
465
+
466
+ return new_fwd
467
+
468
+ func.forward = make_graphed_forward(func, func.training, graphed, func.forward) # type: ignore[assignment]
469
+ ret.append(func)
470
+ else:
471
+ ret.append(graphed)
472
+
473
+ if just_one_callable:
474
+ return ret[0]
475
+
476
+ return tuple(ret)
env-llmeval/lib/python3.10/site-packages/torch/cuda/jiterator.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from typing import Callable, List
3
+
4
+ import torch
5
+ from torch import Tensor
6
+
7
+ __all__: List[str] = []
8
+
9
+
10
+ class _CodeParser:
11
+ def __init__(self, code_string: str):
12
+ optional_ws = r"\s*"
13
+ required_ws = r"\s+"
14
+ template_params = r"(?P<template_params>\<.+\>)"
15
+ return_type = r"(?P<return_type>\w+)"
16
+ function_name = r"(?P<function_name>\w+)"
17
+ function_params = r"(?P<function_params>\(.+\))"
18
+ function_body = r"(?P<function_body>\{.+\})"
19
+
20
+ pattern = (
21
+ optional_ws
22
+ + "template"
23
+ + optional_ws
24
+ + template_params
25
+ + optional_ws
26
+ + return_type
27
+ + required_ws
28
+ + function_name
29
+ + optional_ws
30
+ + function_params
31
+ + optional_ws
32
+ + function_body
33
+ + optional_ws
34
+ )
35
+
36
+ result = re.match(
37
+ pattern, code_string, re.DOTALL
38
+ ) # DOTALL for matching multiline
39
+
40
+ if result is None:
41
+ raise Exception(
42
+ f"Couldn't parse code, please check correctness:\n {code_string}"
43
+ )
44
+
45
+ self.template_params = result["template_params"]
46
+ self.return_type = result["return_type"]
47
+ self.function_name = result["function_name"]
48
+ self.function_params = result["function_params"]
49
+ self.function_body = result["function_body"]
50
+
51
+
52
+ class _JittedFunction:
53
+ def __init__(
54
+ self, code_string: str, return_by_ref: bool, num_outputs: int, **kwargs
55
+ ):
56
+ self.code_string = code_string
57
+
58
+ assert (
59
+ return_by_ref or num_outputs == 1
60
+ ), "Return by value only works for single output. "
61
+ self.return_by_ref = return_by_ref
62
+ self.num_outputs = num_outputs
63
+
64
+ parsed_code = _CodeParser(code_string)
65
+ self.kernel_name = parsed_code.function_name
66
+
67
+ self.kwargs_dict = kwargs
68
+ self.is_cuda_available = torch.cuda.is_available()
69
+
70
+ def __call__(self, *tensors: Tensor, **kwargs):
71
+ # Jiterator follow torch.cuda's lazy initialization behavior
72
+ # Defer checking cuda's availability at the function invocation time
73
+ assert (
74
+ self.is_cuda_available
75
+ ), "Jiterator is only supported on CUDA and ROCm GPUs, none are available."
76
+
77
+ assert len(tensors) <= 8, "jiterator only supports up to 8 tensor inputs."
78
+
79
+ expanded_kwargs = self.kwargs_dict.copy()
80
+ for key, value in kwargs.items():
81
+ if key in self.kwargs_dict:
82
+ expanded_kwargs[key] = value
83
+ else:
84
+ raise KeyError(f"{key} is not declared in function definition")
85
+
86
+ return torch._C._cuda_jiterator_compile_and_launch_kernel(
87
+ self.code_string,
88
+ self.kernel_name,
89
+ self.return_by_ref,
90
+ self.num_outputs,
91
+ tensors,
92
+ expanded_kwargs,
93
+ )
94
+
95
+
96
+ def _create_jit_fn(code_string: str, **kwargs) -> Callable:
97
+ """
98
+ Create a jiterator-generated cuda kernel for an elementwise op.
99
+
100
+ The code string has to be a valid CUDA function that describes the computation for a single element. The code
101
+ string has to follow the c++ template pattern, as shown in the example below. This function will be inlined
102
+ into elementwise kernel template, and compiled on the fly. Compiled kernel will be cached in memory, as well as
103
+ local temp dir.
104
+
105
+ Jiterator-generated kernels accepts noncontiguous tensors, and supports broadcasting and type promotion.
106
+
107
+ Args:
108
+ code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return by value.
109
+ kwargs (Dict, optional): Keyword arguments for generated function
110
+
111
+ Example::
112
+
113
+ code_string = "template <typename T> T my_kernel(T x, T y, T alpha) { return -x + alpha * y; }"
114
+ jitted_fn = create_jit_fn(code_string, alpha=1.0)
115
+ a = torch.rand(3, device='cuda')
116
+ b = torch.rand(3, device='cuda')
117
+ # invoke jitted function like a regular python function
118
+ result = jitted_fn(a, b, alpha=3.14)
119
+
120
+ code_string also allows multiple function definitions, and the last function will be treated as the entry function.
121
+
122
+ Example::
123
+
124
+ code_string = "template <typename T> T util_fn(T x, T y) { return ::sin(x) + ::cos(y); }"
125
+ code_string += "template <typename T> T my_kernel(T x, T y, T val) { return ::min(val, util_fn(x, y)); }"
126
+ jitted_fn = create_jit_fn(code_string, val=0.0)
127
+ a = torch.rand(3, device='cuda')
128
+ b = torch.rand(3, device='cuda')
129
+ # invoke jitted function like a regular python function
130
+ result = jitted_fn(a, b) # using default val=0.0
131
+
132
+ Jiterator can be used together with python registration to override an operator's cuda kernel.
133
+ Following example is overriding gelu's cuda kernel with relu.
134
+
135
+ Example::
136
+
137
+ code_string = "template <typename T> T my_gelu(T a) { return a > 0 ? a : 0; }"
138
+ my_gelu = create_jit_fn(code_string)
139
+ my_lib = torch.library.Library("aten", "IMPL")
140
+ my_lib.impl('aten::gelu', my_gelu, "CUDA")
141
+ # torch.nn.GELU and torch.nn.function.gelu are now overridden
142
+ a = torch.rand(3, device='cuda')
143
+ torch.allclose(torch.nn.functional.gelu(a), torch.nn.functional.relu(a))
144
+
145
+ .. warning::
146
+ This API is in beta and may change in future releases.
147
+
148
+ .. warning::
149
+ This API only supports up to 8 inputs and 1 output
150
+
151
+ .. warning::
152
+ All input tensors must live in CUDA device
153
+ """
154
+ return _JittedFunction(code_string, return_by_ref=False, num_outputs=1, **kwargs)
155
+
156
+
157
+ def _create_multi_output_jit_fn(
158
+ code_string: str, num_outputs: int, **kwargs
159
+ ) -> Callable:
160
+ """
161
+ Create a jiterator-generated cuda kernel for an elementwise op that supports returning one or more outputs.
162
+
163
+ Args:
164
+ code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return value by reference.
165
+ num_outputs(int): number of outputs return by the kernel
166
+ kwargs (Dict, optional): Keyword arguments for generated function
167
+
168
+ Example::
169
+
170
+ code_string = "template <typename T> void my_kernel(T x, T y, T alpha, T& out) { out = -x + alpha * y; }"
171
+ jitted_fn = create_jit_fn(code_string, alpha=1.0)
172
+ a = torch.rand(3, device='cuda')
173
+ b = torch.rand(3, device='cuda')
174
+ # invoke jitted function like a regular python function
175
+ result = jitted_fn(a, b, alpha=3.14)
176
+
177
+ .. warning::
178
+ This API is in beta and may change in future releases.
179
+
180
+ .. warning::
181
+ This API only supports up to 8 inputs and 8 outputs
182
+ """
183
+ return _JittedFunction(
184
+ code_string, return_by_ref=True, num_outputs=num_outputs, **kwargs
185
+ )
env-llmeval/lib/python3.10/site-packages/torch/cuda/memory.py ADDED
@@ -0,0 +1,914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""This package adds support for device memory management implemented in CUDA."""
2
+
3
+ import collections
4
+ import contextlib
5
+ import ctypes
6
+ import pickle
7
+ import sys
8
+ import warnings
9
+ from inspect import signature
10
+
11
+ from typing import Any, Dict, Optional, Tuple, Union
12
+
13
+ import torch
14
+ from torch import _C
15
+
16
+ from torch.types import Device
17
+ from . import _get_device_index, _get_nvml_device_index, _lazy_init, is_initialized
18
+
19
+ from ._memory_viz import memory as _memory, segments as _segments
20
+ from ._utils import _dummy_type
21
+
22
+ __all__ = [
23
+ "caching_allocator_alloc",
24
+ "caching_allocator_delete",
25
+ "set_per_process_memory_fraction",
26
+ "empty_cache",
27
+ "memory_stats",
28
+ "memory_stats_as_nested_dict",
29
+ "reset_accumulated_memory_stats",
30
+ "reset_peak_memory_stats",
31
+ "reset_max_memory_allocated",
32
+ "reset_max_memory_cached",
33
+ "memory_allocated",
34
+ "max_memory_allocated",
35
+ "memory_reserved",
36
+ "max_memory_reserved",
37
+ "memory_cached",
38
+ "max_memory_cached",
39
+ "memory_snapshot",
40
+ "memory_summary",
41
+ "list_gpu_processes",
42
+ "mem_get_info",
43
+ "get_allocator_backend",
44
+ "CUDAPluggableAllocator",
45
+ "change_current_allocator",
46
+ ]
47
+
48
+
49
+ if not hasattr(torch._C, "_cuda_CUDAAllocator"):
50
+ # Define dummy base classes
51
+ torch._C.__dict__["_cuda_CUDAAllocator"] = _dummy_type("_cuda_CUDAAllocator")
52
+
53
+
54
+ def _host_allocator():
55
+ _lazy_init()
56
+ return torch._C._cuda_cudaHostAllocator()
57
+
58
+
59
+ @contextlib.contextmanager
60
+ def _free_mutex():
61
+ torch._C._cuda_lock_mutex()
62
+ try:
63
+ yield
64
+ finally:
65
+ torch._C._cuda_unlock_mutex()
66
+
67
+
68
+ def caching_allocator_alloc(size, device: Union[Device, int] = None, stream=None):
69
+ r"""Perform a memory allocation using the CUDA memory allocator.
70
+
71
+ Memory is allocated for a given device and a stream, this
72
+ function is intended to be used for interoperability with other
73
+ frameworks. Allocated memory is released through
74
+ :func:`~torch.cuda.caching_allocator_delete`.
75
+
76
+ Args:
77
+ size (int): number of bytes to be allocated.
78
+ device (torch.device or int, optional): selected device. If it is
79
+ ``None`` the default CUDA device is used.
80
+ stream (torch.cuda.Stream or int, optional): selected stream. If is ``None`` then
81
+ the default stream for the selected device is used.
82
+
83
+ .. note::
84
+ See :ref:`cuda-memory-management` for more details about GPU memory
85
+ management.
86
+ """
87
+ if device is None:
88
+ device = torch.cuda.current_device()
89
+ device = _get_device_index(device)
90
+ if stream is None:
91
+ stream = torch.cuda.current_stream(device)
92
+ if isinstance(stream, torch.cuda.streams.Stream):
93
+ stream = stream.cuda_stream
94
+ if not isinstance(stream, int):
95
+ raise TypeError(
96
+ "Invalid type for stream argument, must be "
97
+ "`torch.cuda.Stream` or `int` representing a pointer "
98
+ "to a existing stream"
99
+ )
100
+ with torch.cuda.device(device):
101
+ return torch._C._cuda_cudaCachingAllocator_raw_alloc(size, stream)
102
+
103
+
104
+ def caching_allocator_delete(mem_ptr):
105
+ r"""Delete memory allocated using the CUDA memory allocator.
106
+
107
+ Memory allocated with :func:`~torch.cuda.caching_allocator_alloc`.
108
+ is freed here. The associated device and stream are tracked inside
109
+ the allocator.
110
+
111
+ Args:
112
+ mem_ptr (int): memory address to be freed by the allocator.
113
+
114
+ .. note::
115
+ See :ref:`cuda-memory-management` for more details about GPU memory
116
+ management.
117
+ """
118
+ torch._C._cuda_cudaCachingAllocator_raw_delete(mem_ptr)
119
+
120
+
121
+ def set_per_process_memory_fraction(
122
+ fraction, device: Union[Device, int] = None
123
+ ) -> None:
124
+ r"""Set memory fraction for a process.
125
+
126
+ The fraction is used to limit an caching allocator to allocated memory on a CUDA device.
127
+ The allowed value equals the total visible memory multiplied fraction.
128
+ If trying to allocate more than the allowed value in a process, will raise an out of
129
+ memory error in allocator.
130
+
131
+ Args:
132
+ fraction(float): Range: 0~1. Allowed memory equals total_memory * fraction.
133
+ device (torch.device or int, optional): selected device. If it is
134
+ ``None`` the default CUDA device is used.
135
+ .. note::
136
+ In general, the total available free memory is less than the total capacity.
137
+ """
138
+ _lazy_init()
139
+ if device is None:
140
+ device = torch.cuda.current_device()
141
+ device = _get_device_index(device)
142
+ if not isinstance(fraction, float):
143
+ raise TypeError("Invalid type for fraction argument, must be `float`")
144
+ if fraction < 0 or fraction > 1:
145
+ raise ValueError(f"Invalid fraction value: {fraction}. Allowed range: 0~1")
146
+
147
+ torch._C._cuda_setMemoryFraction(fraction, device)
148
+
149
+
150
+ def empty_cache() -> None:
151
+ r"""Release all unoccupied cached memory currently held by the caching
152
+ allocator so that those can be used in other GPU application and visible in
153
+ `nvidia-smi`.
154
+
155
+ .. note::
156
+ :func:`~torch.cuda.empty_cache` doesn't increase the amount of GPU
157
+ memory available for PyTorch. However, it may help reduce fragmentation
158
+ of GPU memory in certain cases. See :ref:`cuda-memory-management` for
159
+ more details about GPU memory management.
160
+ """
161
+ if is_initialized():
162
+ torch._C._cuda_emptyCache()
163
+
164
+
165
+ def memory_stats(device: Union[Device, int] = None) -> Dict[str, Any]:
166
+ r"""Return a dictionary of CUDA memory allocator statistics for a given device.
167
+
168
+ The return value of this function is a dictionary of statistics, each of
169
+ which is a non-negative integer.
170
+
171
+ Core statistics:
172
+
173
+ - ``"allocated.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
174
+ number of allocation requests received by the memory allocator.
175
+ - ``"allocated_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
176
+ amount of allocated memory.
177
+ - ``"segment.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
178
+ number of reserved segments from ``cudaMalloc()``.
179
+ - ``"reserved_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
180
+ amount of reserved memory.
181
+ - ``"active.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
182
+ number of active memory blocks.
183
+ - ``"active_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
184
+ amount of active memory.
185
+ - ``"inactive_split.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
186
+ number of inactive, non-releasable memory blocks.
187
+ - ``"inactive_split_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
188
+ amount of inactive, non-releasable memory.
189
+
190
+ For these core statistics, values are broken down as follows.
191
+
192
+ Pool type:
193
+
194
+ - ``all``: combined statistics across all memory pools.
195
+ - ``large_pool``: statistics for the large allocation pool
196
+ (as of October 2019, for size >= 1MB allocations).
197
+ - ``small_pool``: statistics for the small allocation pool
198
+ (as of October 2019, for size < 1MB allocations).
199
+
200
+ Metric type:
201
+
202
+ - ``current``: current value of this metric.
203
+ - ``peak``: maximum value of this metric.
204
+ - ``allocated``: historical total increase in this metric.
205
+ - ``freed``: historical total decrease in this metric.
206
+
207
+ In addition to the core statistics, we also provide some simple event
208
+ counters:
209
+
210
+ - ``"num_alloc_retries"``: number of failed ``cudaMalloc`` calls that
211
+ result in a cache flush and retry.
212
+ - ``"num_ooms"``: number of out-of-memory errors thrown.
213
+
214
+ The caching allocator can be configured via ENV to not split blocks larger than a
215
+ defined size (see Memory Management section of the Cuda Semantics documentation).
216
+ This helps avoid memory fragmentation but may have a performance
217
+ penalty. Additional outputs to assist with tuning and evaluating impact:
218
+
219
+ - ``"max_split_size"``: blocks above this size will not be split.
220
+ - ``"oversize_allocations.{current,peak,allocated,freed}"``:
221
+ number of over-size allocation requests received by the memory allocator.
222
+ - ``"oversize_segments.{current,peak,allocated,freed}"``:
223
+ number of over-size reserved segments from ``cudaMalloc()``.
224
+
225
+ The caching allocator can be configured via ENV to round memory allocations in order
226
+ to reduce fragmentation. Sometimes the overhead from rounding can be higher than
227
+ the fragmentation it helps reduce. The following stat can be used to check if
228
+ rounding adds too much overhead:
229
+
230
+ - ``"requested_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
231
+ memory requested by client code, compare this with allocated_bytes to check if
232
+ allocation rounding adds too much overhead.
233
+
234
+ Args:
235
+ device (torch.device or int, optional): selected device. Returns
236
+ statistics for the current device, given by :func:`~torch.cuda.current_device`,
237
+ if :attr:`device` is ``None`` (default).
238
+
239
+ .. note::
240
+ See :ref:`cuda-memory-management` for more details about GPU memory
241
+ management.
242
+
243
+ .. note::
244
+ With :ref:`backend:cudaMallocAsync<cuda-memory-envvars>`, some stats are not
245
+ meaningful, and are always reported as zero.
246
+ """
247
+ result = []
248
+
249
+ def _recurse_add_to_result(prefix, obj):
250
+ if isinstance(obj, dict):
251
+ if len(prefix) > 0:
252
+ prefix += "."
253
+ for k, v in obj.items():
254
+ _recurse_add_to_result(prefix + k, v)
255
+ else:
256
+ result.append((prefix, obj))
257
+
258
+ stats = memory_stats_as_nested_dict(device=device)
259
+ _recurse_add_to_result("", stats)
260
+ result.sort()
261
+
262
+ return collections.OrderedDict(result)
263
+
264
+
265
+ def memory_stats_as_nested_dict(device: Union[Device, int] = None) -> Dict[str, Any]:
266
+ r"""Return the result of :func:`~torch.cuda.memory_stats` as a nested dictionary."""
267
+ if not is_initialized():
268
+ return {}
269
+ device = _get_device_index(device, optional=True)
270
+ return torch._C._cuda_memoryStats(device)
271
+
272
+
273
+ def reset_accumulated_memory_stats(device: Union[Device, int] = None) -> None:
274
+ r"""Reset the "accumulated" (historical) stats tracked by the CUDA memory allocator.
275
+
276
+ See :func:`~torch.cuda.memory_stats` for details. Accumulated stats correspond to
277
+ the `"allocated"` and `"freed"` keys in each individual stat dict, as well as
278
+ `"num_alloc_retries"` and `"num_ooms"`.
279
+
280
+ Args:
281
+ device (torch.device or int, optional): selected device. Returns
282
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
283
+ if :attr:`device` is ``None`` (default).
284
+
285
+ .. note::
286
+ See :ref:`cuda-memory-management` for more details about GPU memory
287
+ management.
288
+ """
289
+ device = _get_device_index(device, optional=True)
290
+ return torch._C._cuda_resetAccumulatedMemoryStats(device)
291
+
292
+
293
+ def reset_peak_memory_stats(device: Union[Device, int] = None) -> None:
294
+ r"""Reset the "peak" stats tracked by the CUDA memory allocator.
295
+
296
+ See :func:`~torch.cuda.memory_stats` for details. Peak stats correspond to the
297
+ `"peak"` key in each individual stat dict.
298
+
299
+ Args:
300
+ device (torch.device or int, optional): selected device. Returns
301
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
302
+ if :attr:`device` is ``None`` (default).
303
+
304
+ .. note::
305
+ See :ref:`cuda-memory-management` for more details about GPU memory
306
+ management.
307
+ """
308
+ device = _get_device_index(device, optional=True)
309
+ return torch._C._cuda_resetPeakMemoryStats(device)
310
+
311
+
312
+ def reset_max_memory_allocated(device: Union[Device, int] = None) -> None:
313
+ r"""Reset the starting point in tracking maximum GPU memory occupied by tensors for a given device.
314
+
315
+ See :func:`~torch.cuda.max_memory_allocated` for details.
316
+
317
+ Args:
318
+ device (torch.device or int, optional): selected device. Returns
319
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
320
+ if :attr:`device` is ``None`` (default).
321
+
322
+ .. warning::
323
+ This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets
324
+ /all/ peak memory stats.
325
+
326
+ .. note::
327
+ See :ref:`cuda-memory-management` for more details about GPU memory
328
+ management.
329
+ """
330
+ warnings.warn(
331
+ "torch.cuda.reset_max_memory_allocated now calls torch.cuda.reset_peak_memory_stats, "
332
+ "which resets /all/ peak memory stats.",
333
+ FutureWarning,
334
+ )
335
+ return reset_peak_memory_stats(device=device)
336
+
337
+
338
+ def reset_max_memory_cached(device: Union[Device, int] = None) -> None:
339
+ r"""Reset the starting point in tracking maximum GPU memory managed by the caching allocator for a given device.
340
+
341
+ See :func:`~torch.cuda.max_memory_cached` for details.
342
+
343
+ Args:
344
+ device (torch.device or int, optional): selected device. Returns
345
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
346
+ if :attr:`device` is ``None`` (default).
347
+
348
+ .. warning::
349
+ This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets
350
+ /all/ peak memory stats.
351
+
352
+ .. note::
353
+ See :ref:`cuda-memory-management` for more details about GPU memory
354
+ management.
355
+ """
356
+ warnings.warn(
357
+ "torch.cuda.reset_max_memory_cached now calls torch.cuda.reset_peak_memory_stats, "
358
+ "which resets /all/ peak memory stats.",
359
+ FutureWarning,
360
+ )
361
+ return reset_peak_memory_stats(device=device)
362
+
363
+
364
+ def memory_allocated(device: Union[Device, int] = None) -> int:
365
+ r"""Return the current GPU memory occupied by tensors in bytes for a given device.
366
+
367
+ Args:
368
+ device (torch.device or int, optional): selected device. Returns
369
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
370
+ if :attr:`device` is ``None`` (default).
371
+
372
+ .. note::
373
+ This is likely less than the amount shown in `nvidia-smi` since some
374
+ unused memory can be held by the caching allocator and some context
375
+ needs to be created on GPU. See :ref:`cuda-memory-management` for more
376
+ details about GPU memory management.
377
+ """
378
+ return memory_stats(device=device).get("allocated_bytes.all.current", 0)
379
+
380
+
381
+ def max_memory_allocated(device: Union[Device, int] = None) -> int:
382
+ r"""Return the maximum GPU memory occupied by tensors in bytes for a given device.
383
+
384
+ By default, this returns the peak allocated memory since the beginning of
385
+ this program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to
386
+ reset the starting point in tracking this metric. For example, these two
387
+ functions can measure the peak allocated memory usage of each iteration in a
388
+ training loop.
389
+
390
+ Args:
391
+ device (torch.device or int, optional): selected device. Returns
392
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
393
+ if :attr:`device` is ``None`` (default).
394
+
395
+ .. note::
396
+ See :ref:`cuda-memory-management` for more details about GPU memory
397
+ management.
398
+ """
399
+ return memory_stats(device=device).get("allocated_bytes.all.peak", 0)
400
+
401
+
402
+ def memory_reserved(device: Union[Device, int] = None) -> int:
403
+ r"""Return the current GPU memory managed by the caching allocator in bytes for a given device.
404
+
405
+ Args:
406
+ device (torch.device or int, optional): selected device. Returns
407
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
408
+ if :attr:`device` is ``None`` (default).
409
+
410
+ .. note::
411
+ See :ref:`cuda-memory-management` for more details about GPU memory
412
+ management.
413
+ """
414
+ return memory_stats(device=device).get("reserved_bytes.all.current", 0)
415
+
416
+
417
+ def max_memory_reserved(device: Union[Device, int] = None) -> int:
418
+ r"""Return the maximum GPU memory managed by the caching allocator in bytes for a given device.
419
+
420
+ By default, this returns the peak cached memory since the beginning of this
421
+ program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to reset
422
+ the starting point in tracking this metric. For example, these two functions
423
+ can measure the peak cached memory amount of each iteration in a training
424
+ loop.
425
+
426
+ Args:
427
+ device (torch.device or int, optional): selected device. Returns
428
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
429
+ if :attr:`device` is ``None`` (default).
430
+
431
+ .. note::
432
+ See :ref:`cuda-memory-management` for more details about GPU memory
433
+ management.
434
+ """
435
+ return memory_stats(device=device).get("reserved_bytes.all.peak", 0)
436
+
437
+
438
+ def memory_cached(device: Union[Device, int] = None) -> int:
439
+ r"""Deprecated; see :func:`~torch.cuda.memory_reserved`."""
440
+ warnings.warn(
441
+ "torch.cuda.memory_cached has been renamed to torch.cuda.memory_reserved",
442
+ FutureWarning,
443
+ )
444
+ return memory_reserved(device=device)
445
+
446
+
447
+ def max_memory_cached(device: Union[Device, int] = None) -> int:
448
+ r"""Deprecated; see :func:`~torch.cuda.max_memory_reserved`."""
449
+ warnings.warn(
450
+ "torch.cuda.max_memory_cached has been renamed to torch.cuda.max_memory_reserved",
451
+ FutureWarning,
452
+ )
453
+ return max_memory_reserved(device=device)
454
+
455
+
456
+ def memory_snapshot():
457
+ r"""Return a snapshot of the CUDA memory allocator state across all devices.
458
+
459
+ Interpreting the output of this function requires familiarity with the
460
+ memory allocator internals.
461
+
462
+ .. note::
463
+ See :ref:`cuda-memory-management` for more details about GPU memory
464
+ management.
465
+ """
466
+ return torch._C._cuda_memorySnapshot()["segments"]
467
+
468
+
469
+ def memory_summary(device: Union[Device, int] = None, abbreviated: bool = False) -> str:
470
+ r"""Return a human-readable printout of the current memory allocator statistics for a given device.
471
+
472
+ This can be useful to display periodically during training, or when
473
+ handling out-of-memory exceptions.
474
+
475
+ Args:
476
+ device (torch.device or int, optional): selected device. Returns
477
+ printout for the current device, given by :func:`~torch.cuda.current_device`,
478
+ if :attr:`device` is ``None`` (default).
479
+ abbreviated (bool, optional): whether to return an abbreviated summary
480
+ (default: False).
481
+
482
+ .. note::
483
+ See :ref:`cuda-memory-management` for more details about GPU memory
484
+ management.
485
+ """
486
+ device = _get_device_index(device, optional=True)
487
+ stats = memory_stats(device=device)
488
+
489
+ def _format_size(sz, pref_sz):
490
+ prefixes = ["B ", "KiB", "MiB", "GiB", "TiB", "PiB"]
491
+ prefix = prefixes[0]
492
+ for new_prefix in prefixes[1:]:
493
+ if pref_sz < 768 * 1024:
494
+ break
495
+ prefix = new_prefix
496
+ sz //= 1024
497
+ pref_sz /= 1024
498
+ return f"{sz:6d} {prefix}"
499
+
500
+ def _format_count(cnt, pref_cnt):
501
+ prefixes = [" ", "K", "M"]
502
+ prefix = prefixes[0]
503
+ for new_prefix in prefixes[1:]:
504
+ if pref_cnt < 750 * 1000:
505
+ break
506
+ prefix = new_prefix
507
+ cnt //= 1000
508
+ pref_cnt /= 1000
509
+ return f"{cnt:7d} {prefix} "
510
+
511
+ metrics_to_display = [
512
+ ("allocated_bytes", "Allocated memory", _format_size),
513
+ ("active_bytes", "Active memory", _format_size),
514
+ ("requested_bytes", "Requested memory", _format_size),
515
+ ("reserved_bytes", "GPU reserved memory", _format_size),
516
+ ("inactive_split_bytes", "Non-releasable memory", _format_size),
517
+ ("allocation", "Allocations", _format_count),
518
+ ("active", "Active allocs", _format_count),
519
+ ("segment", "GPU reserved segments", _format_count),
520
+ ("inactive_split", "Non-releasable allocs", _format_count),
521
+ ]
522
+
523
+ lines = []
524
+ lines.append("=" * 75)
525
+ lines.append(" {_:16} PyTorch CUDA memory summary, device ID {device:<17d} ")
526
+ lines.append("-" * 75)
527
+ lines.append(
528
+ " {_:9} CUDA OOMs: {num_ooms:<12d} | {_:6} cudaMalloc retries: {num_alloc_retries:<8d} "
529
+ )
530
+ lines.append("=" * 75)
531
+ lines.append(
532
+ " Metric | Cur Usage | Peak Usage | Tot Alloc | Tot Freed "
533
+ )
534
+
535
+ for metric_key, metric_name, formatter in metrics_to_display:
536
+ lines.append("-" * 75)
537
+ submetrics = [("all", metric_name)]
538
+ if not abbreviated:
539
+ submetrics.append(("large_pool", " from large pool"))
540
+ submetrics.append(("small_pool", " from small pool"))
541
+
542
+ current_prefval, peak_prefval, allocated_prefval, freed_prefval = (
543
+ None,
544
+ None,
545
+ None,
546
+ None,
547
+ )
548
+
549
+ for submetric_key, submetric_name in submetrics:
550
+ prefix = metric_key + "." + submetric_key + "."
551
+
552
+ current = stats[prefix + "current"]
553
+ peak = stats[prefix + "peak"]
554
+ allocated = stats[prefix + "allocated"]
555
+ freed = stats[prefix + "freed"]
556
+
557
+ if current_prefval is None:
558
+ current_prefval = current
559
+ peak_prefval = peak
560
+ allocated_prefval = allocated
561
+ freed_prefval = freed
562
+
563
+ lines.append(
564
+ " {:<21} | {} | {} | {} | {} ".format(
565
+ submetric_name,
566
+ formatter(current, current_prefval),
567
+ formatter(peak, peak_prefval),
568
+ formatter(allocated, allocated_prefval),
569
+ formatter(freed, freed_prefval),
570
+ ),
571
+ )
572
+
573
+ metrics_to_display = [
574
+ ("oversize_allocations", "Oversize allocations", _format_count),
575
+ ("oversize_segments", "Oversize GPU segments", _format_count),
576
+ ]
577
+
578
+ for metric_key, metric_name, formatter in metrics_to_display:
579
+ lines.append("-" * 75)
580
+
581
+ prefix = metric_key + "."
582
+
583
+ current = stats[prefix + "current"]
584
+ peak = stats[prefix + "peak"]
585
+ allocated = stats[prefix + "allocated"]
586
+ freed = stats[prefix + "freed"]
587
+
588
+ lines.append(
589
+ " {:<21} | {} | {} | {} | {} ".format(
590
+ metric_name,
591
+ formatter(current, current),
592
+ formatter(peak, peak),
593
+ formatter(allocated, allocated),
594
+ formatter(freed, freed),
595
+ ),
596
+ )
597
+
598
+ lines.append("=" * 75)
599
+
600
+ fmt_dict = {"_": "", "device": device}
601
+ for k, v in stats.items():
602
+ fmt_dict[k.replace(".", "-")] = v
603
+ return "|" + "|\n|".join(lines).format(**fmt_dict) + "|\n"
604
+
605
+
606
+ def list_gpu_processes(device: Union[Device, int] = None) -> str:
607
+ r"""Return a human-readable printout of the running processes and their GPU memory use for a given device.
608
+
609
+ This can be useful to display periodically during training, or when
610
+ handling out-of-memory exceptions.
611
+
612
+ Args:
613
+ device (torch.device or int, optional): selected device. Returns
614
+ printout for the current device, given by :func:`~torch.cuda.current_device`,
615
+ if :attr:`device` is ``None`` (default).
616
+ """
617
+ try:
618
+ import pynvml # type: ignore[import]
619
+ except ModuleNotFoundError:
620
+ return "pynvml module not found, please install pynvml"
621
+ from pynvml import NVMLError_DriverNotLoaded
622
+
623
+ try:
624
+ pynvml.nvmlInit()
625
+ except NVMLError_DriverNotLoaded:
626
+ return "cuda driver can't be loaded, is cuda enabled?"
627
+ device = _get_nvml_device_index(device)
628
+ handle = pynvml.nvmlDeviceGetHandleByIndex(device)
629
+ procs = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
630
+ lines = []
631
+ lines.append(f"GPU:{device}")
632
+ if len(procs) == 0:
633
+ lines.append("no processes are running")
634
+ for p in procs:
635
+ mem = p.usedGpuMemory / (1024 * 1024)
636
+ lines.append(f"process {p.pid:>10d} uses {mem:>12.3f} MB GPU memory")
637
+ return "\n".join(lines)
638
+
639
+
640
+ def mem_get_info(device: Union[Device, int] = None) -> Tuple[int, int]:
641
+ r"""Return the global free and total GPU memory for a given device using cudaMemGetInfo.
642
+
643
+ Args:
644
+ device (torch.device or int, optional): selected device. Returns
645
+ statistic for the current device, given by :func:`~torch.cuda.current_device`,
646
+ if :attr:`device` is ``None`` (default).
647
+
648
+ .. note::
649
+ See :ref:`cuda-memory-management` for more
650
+ details about GPU memory management.
651
+ """
652
+ if device is None:
653
+ device = torch.cuda.current_device()
654
+ device = _get_device_index(device)
655
+ return torch.cuda.cudart().cudaMemGetInfo(device)
656
+
657
+
658
+ def _record_memory_history_legacy(
659
+ enabled: bool,
660
+ record_context=True,
661
+ trace_alloc_max_entries=1,
662
+ trace_alloc_record_context=False,
663
+ device: Union[Device, int] = None,
664
+ record_context_cpp=False,
665
+ ):
666
+ _C._cuda_record_memory_history_legacy(
667
+ enabled,
668
+ record_context,
669
+ trace_alloc_max_entries,
670
+ trace_alloc_record_context,
671
+ record_context_cpp,
672
+ )
673
+
674
+
675
+ def _record_memory_history(enabled="all", *args, **kwargs):
676
+ """Enable recording of stack traces associated with memory
677
+ allocations, so you can tell what allocated any piece of memory in
678
+ :func:`torch.cuda.memory._snapshot()`.
679
+
680
+ In addition too keeping stack traces with each current allocation and free,
681
+ this will also enable recording of a history of all alloc/free events.
682
+
683
+ Use :func:`torch.cuda.memory._snapshot()` to retrieve this information,
684
+ and the tools in `_memory_viz.py` to visualize snapshots.
685
+
686
+ The Python trace collection is fast (2us per trace), so you may consider
687
+ enabling this on production jobs if you anticipate ever having to debug
688
+ memory issues.
689
+
690
+ C++ trace collection is also fast (~50ns/frame), which for many typical programs
691
+ works out to ~2us per trace, but can vary depending on stack depth.
692
+
693
+ Args:
694
+ enabled (Literal[None, "state", "all"], optional):
695
+ `None`, disable recording memory history.
696
+ `"state"`, keep information for currenly allocated memory.
697
+ `"all"`, additionally keep a history of all alloc/free calls.
698
+ Defaults to "all".
699
+ context (Literal[None, "state", "alloc", "all"], optional):
700
+ `None`, Do not record any tracebacks.
701
+ `"state"`, Record tracebacks for currently allocated memory.
702
+ `"alloc"`, additionally keep tracebacks for alloc calls.
703
+ `"all"`, additionally keep tracebacks for free calls.
704
+ Defaults to "all".
705
+ stacks (Literal["python", "all"], optional):
706
+ `"python"`, include Python, TorchScript, and inductor frames in tracebacks
707
+ `"all"`, additionally include C++ frames
708
+ Defaults to "all".
709
+ max_entries (int, optional): Keep a maximum of `max_entries`
710
+ alloc/free events in the recorded history recorded.
711
+ """
712
+ if isinstance(enabled, bool):
713
+ return _record_memory_history_legacy(enabled, *args, **kwargs)
714
+ else:
715
+ return _record_memory_history_impl(enabled, *args, **kwargs)
716
+
717
+
718
+ def _record_memory_history_impl(
719
+ enabled: Optional[str] = "all",
720
+ context: Optional[str] = "all",
721
+ stacks: str = "all",
722
+ max_entries: int = sys.maxsize,
723
+ device: Union[Device, int] = None,
724
+ ):
725
+ _C._cuda_record_memory_history(enabled, context, stacks, max_entries)
726
+
727
+
728
+ _record_memory_history.__signature__ = signature(_record_memory_history_impl) # type: ignore[attr-defined]
729
+
730
+
731
+ def _snapshot(device: Union[Device, int] = None):
732
+ """Save a snapshot of CUDA memory state at the time it was called.
733
+
734
+ The state is represented as a dictionary with the following structure.
735
+
736
+ .. code-block:: python
737
+
738
+ class Snapshot(TypedDict):
739
+ segments : List[Segment]
740
+ device_traces: List[List[TraceEntry]]
741
+
742
+ class Segment(TypedDict):
743
+ # Segments are memory returned from a cudaMalloc call.
744
+ # The size of reserved memory is the sum of all Segments.
745
+ # Segments are cached and reused for future allocations.
746
+ # If the reuse is smaller than the segment, the segment
747
+ # is split into more then one Block.
748
+ # empty_cache() frees Segments that are entirely inactive.
749
+ address: int
750
+ total_size: int # cudaMalloc'd size of segment
751
+ stream: int
752
+ segment_type: Literal['small', 'large'] # 'large' (>1MB)
753
+ allocated_size: int # size of memory in use
754
+ active_size: int # size of memory in use or in active_awaiting_free state
755
+ blocks : List[Block]
756
+
757
+ class Block(TypedDict):
758
+ # A piece of memory returned from the allocator, or
759
+ # current cached but inactive.
760
+ size: int
761
+ requested_size: int # size requested during malloc, may be smaller than
762
+ # size due to rounding
763
+ address: int
764
+ state: Literal['active_allocated', # used by a tensor
765
+ 'active_awaiting_free', # waiting for another stream to finish using
766
+ # this, then it will become free
767
+ 'inactive',] # free for reuse
768
+ frames: List[Frame] # stack trace from where the allocation occurred
769
+
770
+ class Frame(TypedDict):
771
+ filename: str
772
+ line: int
773
+ name: str
774
+
775
+ class TraceEntry(TypedDict):
776
+ # When `torch.cuda.memory._record_memory_history()` is enabled,
777
+ # the snapshot will contain TraceEntry objects that record each
778
+ # action the allocator took.
779
+ action: Literal[
780
+ 'alloc' # memory allocated
781
+ 'free_requested', # the allocated received a call to free memory
782
+ 'free_completed', # the memory that was requested to be freed is now
783
+ # able to be used in future allocation calls
784
+ 'segment_alloc', # the caching allocator ask cudaMalloc for more memory
785
+ # and added it as a segment in its cache
786
+ 'segment_free', # the caching allocator called cudaFree to return memory
787
+ # to cuda possibly trying free up memory to
788
+ # allocate more segments or because empty_caches was called
789
+ 'oom', # the allocator threw an OOM exception. 'size' is
790
+ # the requested number of bytes that did not succeed
791
+ 'snapshot' # the allocator generated a memory snapshot
792
+ # useful to coorelate a previously taken
793
+ # snapshot with this trace
794
+ ]
795
+ addr: int # not present for OOM
796
+ frames: List[Frame]
797
+ size: int
798
+ stream: int
799
+ device_free: int # only present for OOM, the amount of
800
+ # memory cuda still reports to be free
801
+
802
+ Returns:
803
+ The Snapshot dictionary object
804
+ """
805
+ return _C._cuda_memorySnapshot()
806
+
807
+
808
+ def _dump_snapshot(filename="dump_snapshot.pickle"):
809
+ """
810
+ Save a pickled version of the `torch.memory._snapshot()` dictionary to a file.
811
+
812
+ This file can be opened by the interactive snapshot viewer at pytorch.org/memory_viz
813
+
814
+ Args:
815
+ filename (str, optional): Name of the file to create. Defaults to "dump_snapshot.pickle".
816
+ """
817
+ s = _snapshot()
818
+ with open(filename, "wb") as f:
819
+ pickle.dump(s, f)
820
+
821
+
822
+ def _save_segment_usage(filename="output.svg", snapshot=None):
823
+ if snapshot is None:
824
+ snapshot = _snapshot()
825
+ with open(filename, "w") as f:
826
+ f.write(_segments(snapshot))
827
+
828
+
829
+ def _save_memory_usage(filename="output.svg", snapshot=None):
830
+ if snapshot is None:
831
+ snapshot = _snapshot()
832
+ with open(filename, "w") as f:
833
+ f.write(_memory(snapshot))
834
+
835
+
836
+ def _set_allocator_settings(env: str):
837
+ return torch._C._cuda_cudaCachingAllocator_set_allocator_settings(env)
838
+
839
+
840
+ def get_allocator_backend() -> str:
841
+ r"""Return a string describing the active allocator backend as set by
842
+ ``PYTORCH_CUDA_ALLOC_CONF``. Currently available backends are
843
+ ``native`` (PyTorch's native caching allocator) and `cudaMallocAsync``
844
+ (CUDA's built-in asynchronous allocator).
845
+
846
+ .. note::
847
+ See :ref:`cuda-memory-management` for details on choosing the allocator backend.
848
+ """
849
+ return torch._C._cuda_getAllocatorBackend()
850
+
851
+
852
+ class _CUDAAllocator:
853
+ r"""Wrapper over internal CUDA memory allocators."""
854
+
855
+ def __init__(self, allocator: torch._C._cuda_CUDAAllocator):
856
+ self._allocator = allocator
857
+
858
+ def allocator(self):
859
+ return self._allocator
860
+
861
+
862
+ class CUDAPluggableAllocator(_CUDAAllocator):
863
+ r"""CUDA memory allocator loaded from a so file."""
864
+
865
+ def __init__(self, path_to_so_file: str, alloc_fn_name: str, free_fn_name: str):
866
+ r"""Memory allocators are compiled in .so files and loaded dynamically using ctypes.
867
+
868
+ To change the active allocator use the :func:`torch.memory.cuda.change_current_allocator` function.
869
+
870
+ Args:
871
+ path_to_so_file(str): Path in the filesystem to the `.so` file containing
872
+ the allocator functions
873
+ alloc_fn_name(str): Name of the function to perform the memory allocation
874
+ in the so file. The signature must be:
875
+ void* alloc_fn_name(ssize_t size, int device, cudaStream_t stream);
876
+ free_fn_name(str): Name of the function to perform the memory release
877
+ in the so file. The signature must be:
878
+ void free_fn_name(void* ptr, size_t size, cudaStream_t stream);
879
+
880
+ .. warning::
881
+ This is currently supported only in unix OSs
882
+
883
+ .. note::
884
+ See :ref:`cuda-memory-management` for details on creating and using a custom allocator
885
+ """
886
+ allocator = ctypes.CDLL(path_to_so_file)
887
+ alloc_fn = ctypes.cast(getattr(allocator, alloc_fn_name), ctypes.c_void_p).value
888
+ free_fn = ctypes.cast(getattr(allocator, free_fn_name), ctypes.c_void_p).value
889
+ assert alloc_fn is not None
890
+ assert free_fn is not None
891
+ self._allocator = torch._C._cuda_customAllocator(alloc_fn, free_fn)
892
+
893
+
894
+ def change_current_allocator(allocator: _CUDAAllocator) -> None:
895
+ r"""Change the currently used memory allocator to be the one provided.
896
+
897
+ If the current allocator has already been used/initialized, this function will error.
898
+
899
+
900
+ Args:
901
+ allocator (torch.cuda.memory._CUDAAllocator): allocator to be set as the active one.
902
+ .. note::
903
+ See :ref:`cuda-memory-management` for details on creating and using a custom allocator
904
+ """
905
+ torch._C._cuda_changeCurrentAllocator(allocator.allocator())
906
+
907
+
908
+ def _get_current_allocator() -> _CUDAAllocator:
909
+ r"""Return the allocator being currently used.
910
+
911
+ .. note::
912
+ See :ref:`cuda-memory-management` for details on creating and using a custom allocator
913
+ """
914
+ return _CUDAAllocator(torch._C._cuda_getAllocator())
env-llmeval/lib/python3.10/site-packages/torch/cuda/nccl.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import warnings
3
+ from typing import Optional, Sequence, Union
4
+
5
+ import torch.cuda
6
+
7
+
8
+ __all__ = ["all_reduce", "reduce", "broadcast", "all_gather", "reduce_scatter"]
9
+
10
+ SUM = 0 # ncclRedOp_t
11
+
12
+
13
+ def is_available(tensors):
14
+ if not hasattr(torch._C, "_nccl_all_reduce"):
15
+ warnings.warn("PyTorch is not compiled with NCCL support")
16
+ return False
17
+
18
+ devices = set()
19
+ for tensor in tensors:
20
+ if tensor.is_sparse:
21
+ return False
22
+ if not tensor.is_contiguous():
23
+ return False
24
+ if not tensor.is_cuda:
25
+ return False
26
+ device = tensor.get_device()
27
+ if device in devices:
28
+ return False
29
+ devices.add(device)
30
+
31
+ return True
32
+
33
+
34
+ def version():
35
+ ver = torch._C._nccl_version()
36
+ major = ver >> 32
37
+ minor = (ver >> 16) & 65535
38
+ patch = ver & 65535
39
+ suffix = torch._C._nccl_version_suffix().decode("utf-8")
40
+ if suffix == "":
41
+ return (major, minor, patch)
42
+ else:
43
+ return (major, minor, patch, suffix)
44
+
45
+
46
+ def unique_id():
47
+ return torch._C._nccl_unique_id()
48
+
49
+
50
+ def init_rank(num_ranks, uid, rank):
51
+ return torch._C._nccl_init_rank(num_ranks, uid, rank)
52
+
53
+
54
+ def _check_sequence_type(inputs: Union[torch.Tensor, Sequence[torch.Tensor]]) -> None:
55
+ if not isinstance(inputs, collections.abc.Container) or isinstance(
56
+ inputs, torch.Tensor
57
+ ):
58
+ raise TypeError("Inputs should be a collection of tensors")
59
+
60
+
61
+ def all_reduce(inputs, outputs=None, op=SUM, streams=None, comms=None):
62
+ _check_sequence_type(inputs)
63
+ if outputs is None:
64
+ outputs = inputs
65
+ _check_sequence_type(outputs)
66
+ torch._C._nccl_all_reduce(inputs, outputs, op, streams, comms)
67
+
68
+
69
+ # `output` used to be `outputs`, taking in a list of tensors. So we have two
70
+ # arguments for BC reasons.
71
+ def reduce(
72
+ inputs: Sequence[torch.Tensor],
73
+ output: Optional[Union[torch.Tensor, Sequence[torch.Tensor]]] = None,
74
+ root: int = 0,
75
+ op: int = SUM,
76
+ streams: Optional[Sequence[torch.cuda.Stream]] = None,
77
+ comms=None,
78
+ *,
79
+ outputs: Optional[Sequence[torch.Tensor]] = None,
80
+ ) -> None:
81
+ _check_sequence_type(inputs)
82
+ _output: torch.Tensor
83
+ if outputs is not None:
84
+ if output is not None:
85
+ raise ValueError(
86
+ "'output' and 'outputs' can not be both specified. 'outputs' is deprecated in "
87
+ "favor of 'output', taking in a single output tensor. The signature of reduce is: "
88
+ "reduce(inputs, output=None, root=0, op=SUM, streams=None, comms=None)."
89
+ )
90
+ else:
91
+ warnings.warn(
92
+ "nccl.reduce with an output tensor list is deprecated. "
93
+ "Please specify a single output tensor with argument 'output' instead instead."
94
+ )
95
+ _output = outputs[root]
96
+ elif not isinstance(output, torch.Tensor) and isinstance(
97
+ output, collections.abc.Sequence
98
+ ):
99
+ # User called old API with positional arguments of list of output tensors.
100
+ warnings.warn(
101
+ "nccl.reduce with an output tensor list is deprecated. "
102
+ "Please specify a single output tensor."
103
+ )
104
+ _output = output[root]
105
+ else:
106
+ _output = inputs[root] if output is None else output
107
+ torch._C._nccl_reduce(inputs, _output, root, op, streams, comms)
108
+
109
+
110
+ def broadcast(
111
+ inputs: Sequence[torch.Tensor], root: int = 0, streams=None, comms=None
112
+ ) -> None:
113
+ _check_sequence_type(inputs)
114
+ torch._C._nccl_broadcast(inputs, root, streams, comms)
115
+
116
+
117
+ def all_gather(
118
+ inputs: Sequence[torch.Tensor],
119
+ outputs: Sequence[torch.Tensor],
120
+ streams=None,
121
+ comms=None,
122
+ ) -> None:
123
+ _check_sequence_type(inputs)
124
+ _check_sequence_type(outputs)
125
+ torch._C._nccl_all_gather(inputs, outputs, streams, comms)
126
+
127
+
128
+ def reduce_scatter(
129
+ inputs: Sequence[torch.Tensor],
130
+ outputs: Sequence[torch.Tensor],
131
+ op: int = SUM,
132
+ streams=None,
133
+ comms=None,
134
+ ) -> None:
135
+ _check_sequence_type(inputs)
136
+ _check_sequence_type(outputs)
137
+ torch._C._nccl_reduce_scatter(inputs, outputs, op, streams, comms)
env-llmeval/lib/python3.10/site-packages/torch/cuda/nvtx.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""This package adds support for NVIDIA Tools Extension (NVTX) used in profiling."""
2
+
3
+ from contextlib import contextmanager
4
+
5
+ try:
6
+ from torch._C import _nvtx
7
+ except ImportError:
8
+
9
+ class _NVTXStub:
10
+ @staticmethod
11
+ def _fail(*args, **kwargs):
12
+ raise RuntimeError(
13
+ "NVTX functions not installed. Are you sure you have a CUDA build?"
14
+ )
15
+
16
+ rangePushA = _fail
17
+ rangePop = _fail
18
+ markA = _fail
19
+
20
+ _nvtx = _NVTXStub() # type: ignore[assignment]
21
+
22
+ __all__ = ["range_push", "range_pop", "range_start", "range_end", "mark", "range"]
23
+
24
+
25
+ def range_push(msg):
26
+ """
27
+ Push a range onto a stack of nested range span. Returns zero-based depth of the range that is started.
28
+
29
+ Args:
30
+ msg (str): ASCII message to associate with range
31
+ """
32
+ return _nvtx.rangePushA(msg)
33
+
34
+
35
+ def range_pop():
36
+ """Pop a range off of a stack of nested range spans. Returns the zero-based depth of the range that is ended."""
37
+ return _nvtx.rangePop()
38
+
39
+
40
+ def range_start(msg) -> int:
41
+ """
42
+ Mark the start of a range with string message. It returns an unique handle
43
+ for this range to pass to the corresponding call to rangeEnd().
44
+
45
+ A key difference between this and range_push/range_pop is that the
46
+ range_start/range_end version supports range across threads (start on one
47
+ thread and end on another thread).
48
+
49
+ Returns: A range handle (uint64_t) that can be passed to range_end().
50
+
51
+ Args:
52
+ msg (str): ASCII message to associate with the range.
53
+ """
54
+ return _nvtx.rangeStartA(msg)
55
+
56
+
57
+ def range_end(range_id) -> None:
58
+ """
59
+ Mark the end of a range for a given range_id.
60
+
61
+ Args:
62
+ range_id (int): an unique handle for the start range.
63
+ """
64
+ _nvtx.rangeEnd(range_id)
65
+
66
+
67
+ def mark(msg):
68
+ """
69
+ Describe an instantaneous event that occurred at some point.
70
+
71
+ Args:
72
+ msg (str): ASCII message to associate with the event.
73
+ """
74
+ return _nvtx.markA(msg)
75
+
76
+
77
+ @contextmanager
78
+ def range(msg, *args, **kwargs):
79
+ """
80
+ Context manager / decorator that pushes an NVTX range at the beginning
81
+ of its scope, and pops it at the end. If extra arguments are given,
82
+ they are passed as arguments to msg.format().
83
+
84
+ Args:
85
+ msg (str): message to associate with the range
86
+ """
87
+ range_push(msg.format(*args, **kwargs))
88
+ try:
89
+ yield
90
+ finally:
91
+ range_pop()
env-llmeval/lib/python3.10/site-packages/torch/cuda/profiler.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import tempfile
3
+
4
+ import torch
5
+ from . import check_error, cudart
6
+
7
+ __all__ = ["init", "start", "stop", "profile"]
8
+
9
+ DEFAULT_FLAGS = [
10
+ "gpustarttimestamp",
11
+ "gpuendtimestamp",
12
+ "gridsize3d",
13
+ "threadblocksize",
14
+ "streamid",
15
+ "enableonstart 0",
16
+ "conckerneltrace",
17
+ ]
18
+
19
+
20
+ def init(output_file, flags=None, output_mode="key_value"):
21
+ rt = cudart()
22
+ if not hasattr(rt, "cudaOutputMode"):
23
+ raise AssertionError("HIP does not support profiler initialization!")
24
+ if (
25
+ hasattr(torch.version, "cuda")
26
+ and torch.version.cuda is not None
27
+ and int(torch.version.cuda.split(".")[0]) >= 12
28
+ ):
29
+ # Check https://github.com/pytorch/pytorch/pull/91118
30
+ # cudaProfilerInitialize is no longer needed after CUDA 12
31
+ raise AssertionError("CUDA12+ does not need profiler initialization!")
32
+ flags = DEFAULT_FLAGS if flags is None else flags
33
+ if output_mode == "key_value":
34
+ output_mode_enum = rt.cudaOutputMode.KeyValuePair
35
+ elif output_mode == "csv":
36
+ output_mode_enum = rt.cudaOutputMode.CSV
37
+ else:
38
+ raise RuntimeError(
39
+ "supported CUDA profiler output modes are: key_value and csv"
40
+ )
41
+ with tempfile.NamedTemporaryFile(delete=True) as f:
42
+ f.write(b"\n".join(f.encode("ascii") for f in flags))
43
+ f.flush()
44
+ check_error(rt.cudaProfilerInitialize(f.name, output_file, output_mode_enum))
45
+
46
+
47
+ def start():
48
+ check_error(cudart().cudaProfilerStart())
49
+
50
+
51
+ def stop():
52
+ check_error(cudart().cudaProfilerStop())
53
+
54
+
55
+ @contextlib.contextmanager
56
+ def profile():
57
+ try:
58
+ start()
59
+ yield
60
+ finally:
61
+ stop()