applied-ai-018 commited on
Commit
049ff97
·
verified ·
1 Parent(s): 5a15ca8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/20.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step120/zero/20.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step120/zero/25.post_attention_layernorm.weight/exp_avg.pt +3 -0
  4. venv/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi +0 -0
  5. venv/lib/python3.10/site-packages/torch/_C/__init__.pyi +0 -0
  6. venv/lib/python3.10/site-packages/torch/_C/_aoti.pyi +3 -0
  7. venv/lib/python3.10/site-packages/torch/_C/_autograd.pyi +123 -0
  8. venv/lib/python3.10/site-packages/torch/_C/_cpu.pyi +5 -0
  9. venv/lib/python3.10/site-packages/torch/_C/_cudnn.pyi +17 -0
  10. venv/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi +26 -0
  11. venv/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi +590 -0
  12. venv/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi +188 -0
  13. venv/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi +35 -0
  14. venv/lib/python3.10/site-packages/torch/_C/_functions.pyi +11 -0
  15. venv/lib/python3.10/site-packages/torch/_C/_functorch.pyi +77 -0
  16. venv/lib/python3.10/site-packages/torch/_C/_itt.pyi +5 -0
  17. venv/lib/python3.10/site-packages/torch/_C/_lazy.pyi +28 -0
  18. venv/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi +11 -0
  19. venv/lib/python3.10/site-packages/torch/_C/_monitor.pyi +44 -0
  20. venv/lib/python3.10/site-packages/torch/_C/_nn.pyi +86 -0
  21. venv/lib/python3.10/site-packages/torch/_C/_nvtx.pyi +6 -0
  22. venv/lib/python3.10/site-packages/torch/_C/_onnx.pyi +40 -0
  23. venv/lib/python3.10/site-packages/torch/_C/_profiler.pyi +238 -0
  24. venv/lib/python3.10/site-packages/torch/_C/_verbose.pyi +3 -0
  25. venv/lib/python3.10/site-packages/torch/contrib/__init__.py +0 -0
  26. venv/lib/python3.10/site-packages/torch/contrib/__pycache__/__init__.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py +142 -0
  29. venv/lib/python3.10/site-packages/torch/nested/__init__.py +253 -0
  30. venv/lib/python3.10/site-packages/torch/nested/__pycache__/__init__.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/nested/_internal/nested_tensor.py +431 -0
  32. venv/lib/python3.10/site-packages/torch/nested/_internal/ops.py +1120 -0
  33. venv/lib/python3.10/site-packages/torch/nested/_internal/sdpa.py +780 -0
  34. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__init__.py +8 -0
  35. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/__init__.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/_pass.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/decomposition_skip.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/decomposition_table.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/diagnostics.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/dynamo_graph_extractor.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/fx_onnx_interpreter.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/fx_symbolic_graph_extractor.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/onnxfunction_dispatcher.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/op_validation.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/patcher.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/registration.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/serialization.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/torch_export_graph_extractor.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/type_utils.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/_pass.py +325 -0
ckpts/universal/global_step120/zero/20.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15700589f296845f2d0beb7dfddf71f97d8d1eb420d5a2b4387ae75b5510be67
3
+ size 50332843
ckpts/universal/global_step120/zero/20.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a205de8822f22125338bc73b6e4416ca501f8a31d3fb2f622ab1d37afa2f10d9
3
+ size 33555612
ckpts/universal/global_step120/zero/25.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ef91423cd5f8b3b965c03f597b64aedb8a8b36744d94c84b2fb44fdec9dddaf
3
+ size 9372
venv/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/_C/__init__.pyi ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/_C/_aoti.pyi ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Defined in torch/csrc/inductor/aoti_runner/pybind.cpp
2
+ class AOTIModelContainerRunnerCpu: ...
3
+ class AOTIModelContainerRunnerCuda: ...
venv/lib/python3.10/site-packages/torch/_C/_autograd.pyi ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import Any, Callable, List, Optional, Set
3
+
4
+ import torch
5
+
6
+ from ._profiler import (
7
+ _ProfilerEvent,
8
+ ActiveProfilerType,
9
+ ProfilerActivity,
10
+ ProfilerConfig,
11
+ )
12
+
13
+ # Defined in tools/autograd/init.cpp
14
+
15
+ class DeviceType(Enum):
16
+ CPU = ...
17
+ CUDA = ...
18
+ MKLDNN = ...
19
+ OPENGL = ...
20
+ OPENCL = ...
21
+ IDEEP = ...
22
+ HIP = ...
23
+ FPGA = ...
24
+ ORT = ...
25
+ XLA = ...
26
+ MPS = ...
27
+ HPU = ...
28
+ Meta = ...
29
+ Vulkan = ...
30
+ Metal = ...
31
+ PrivateUse1 = ...
32
+
33
+ class ProfilerEvent:
34
+ def cpu_elapsed_us(self, other: ProfilerEvent) -> float: ...
35
+ def cpu_memory_usage(self) -> int: ...
36
+ def cuda_elapsed_us(self, other: ProfilerEvent) -> float: ...
37
+ def privateuse1_elapsed_us(self, other: ProfilerEvent) -> float: ...
38
+ def cuda_memory_usage(self) -> int: ...
39
+ def device(self) -> int: ...
40
+ def handle(self) -> int: ...
41
+ def has_cuda(self) -> bool: ...
42
+ def is_remote(self) -> bool: ...
43
+ def kind(self) -> int: ...
44
+ def name(self) -> str: ...
45
+ def node_id(self) -> int: ...
46
+ def sequence_nr(self) -> int: ...
47
+ def shapes(self) -> List[List[int]]: ...
48
+ def thread_id(self) -> int: ...
49
+ def flops(self) -> float: ...
50
+ def is_async(self) -> bool: ...
51
+
52
+ class _KinetoEvent:
53
+ def name(self) -> str: ...
54
+ def device_index(self) -> int: ...
55
+ def start_us(self) -> int: ...
56
+ def duration_us(self) -> int: ...
57
+ def is_async(self) -> bool: ...
58
+ def linked_correlation_id(self) -> int: ...
59
+ def shapes(self) -> List[List[int]]: ...
60
+ def dtypes(self) -> List[str]: ...
61
+ def concrete_inputs(self) -> List[Any]: ...
62
+ def device_type(self) -> DeviceType: ...
63
+ def start_thread_id(self) -> int: ...
64
+ def end_thread_id(self) -> int: ...
65
+ def correlation_id(self) -> int: ...
66
+ def fwd_thread_id(self) -> int: ...
67
+ def stack(self) -> List[str]: ...
68
+ def scope(self) -> int: ...
69
+ def sequence_nr(self) -> int: ...
70
+ def flops(self) -> int: ...
71
+ def cuda_elapsed_us(self) -> int: ...
72
+ def privateuse1_elapsed_us(self) -> int: ...
73
+
74
+ class _ProfilerResult:
75
+ def events(self) -> List[_KinetoEvent]: ...
76
+ def legacy_events(self) -> List[List[ProfilerEvent]]: ...
77
+ def save(self, path: str) -> None: ...
78
+ def experimental_event_tree(self) -> List[_ProfilerEvent]: ...
79
+ def trace_start_us(self) -> int: ...
80
+
81
+ class SavedTensor: ...
82
+
83
+ def _enable_profiler(
84
+ config: ProfilerConfig,
85
+ activities: Set[ProfilerActivity],
86
+ ) -> None: ...
87
+ def _prepare_profiler(
88
+ config: ProfilerConfig,
89
+ activities: Set[ProfilerActivity],
90
+ ) -> None: ...
91
+ def _disable_profiler() -> _ProfilerResult: ...
92
+ def _profiler_enabled() -> bool: ...
93
+ def _add_metadata_json(key: str, value: str) -> None: ...
94
+ def _kineto_step() -> None: ...
95
+ def _get_sequence_nr() -> int: ...
96
+ def kineto_available() -> bool: ...
97
+ def _record_function_with_args_enter(name: str, *args) -> torch.Tensor: ...
98
+ def _record_function_with_args_exit(handle: torch.Tensor) -> None: ...
99
+ def _supported_activities() -> Set[ProfilerActivity]: ...
100
+ def _enable_record_function(enable: bool) -> None: ...
101
+ def _set_empty_test_observer(is_global: bool, sampling_prob: float) -> None: ...
102
+ def _push_saved_tensors_default_hooks(
103
+ pack_hook: Callable[[torch.Tensor], Any],
104
+ unpack_hook: Callable[[Any], torch.Tensor],
105
+ ) -> None: ...
106
+ def _pop_saved_tensors_default_hooks() -> None: ...
107
+ def _unsafe_set_version_counter(t: torch.Tensor, prev_version: int) -> None: ...
108
+ def _enable_profiler_legacy(config: ProfilerConfig) -> None: ...
109
+ def _disable_profiler_legacy() -> List[List[ProfilerEvent]]: ...
110
+ def _profiler_type() -> ActiveProfilerType: ...
111
+ def _saved_tensors_hooks_enable() -> None: ...
112
+ def _saved_tensors_hooks_disable(message: str) -> None: ...
113
+ def _saved_tensors_hooks_get_disabled_error_message() -> Optional[str]: ...
114
+
115
+ class CreationMeta(Enum):
116
+ DEFAULT = ...
117
+ IN_CUSTOM_FUNCTION = ...
118
+ MULTI_OUTPUT_NODE = ...
119
+ NO_GRAD_MODE = ...
120
+ INFERENCE_MODE = ...
121
+
122
+ def _set_creation_meta(t: torch.Tensor, creation_meta: CreationMeta) -> None: ...
123
+ def _get_creation_meta(t: torch.Tensor) -> CreationMeta: ...
venv/lib/python3.10/site-packages/torch/_C/_cpu.pyi ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from torch.types import _bool
2
+
3
+ # Defined in torch/csrc/cpu/Module.cpp
4
+
5
+ def _is_cpu_support_vnni() -> _bool: ...
venv/lib/python3.10/site-packages/torch/_C/_cudnn.pyi ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+ from torch.types import _bool, Tuple
4
+
5
+ # Defined in torch/csrc/cuda/shared/cudnn.cpp
6
+ is_cuda: _bool
7
+
8
+ def getRuntimeVersion() -> Tuple[int, int, int]: ...
9
+ def getCompileVersion() -> Tuple[int, int, int]: ...
10
+ def getVersionInt() -> int: ...
11
+
12
+ class RNNMode(int, Enum):
13
+ value: int
14
+ rnn_relu = ...
15
+ rnn_tanh = ...
16
+ lstm = ...
17
+ gru = ...
venv/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Set
2
+
3
+ import torch
4
+
5
+ # This module is defined in torch/csrc/distributed/autograd/init.cpp
6
+
7
+ class DistAutogradContext:
8
+ def _context_id(self) -> int: ...
9
+ def _recv_functions(self) -> Dict[int, Any]: ...
10
+ def _send_functions(self) -> Dict[int, Any]: ...
11
+ def _known_worker_ids(self) -> Set[int]: ...
12
+
13
+ def _new_context() -> DistAutogradContext: ...
14
+ def _release_context(context_id: int) -> None: ...
15
+ def _get_max_id() -> int: ...
16
+ def _is_valid_context(worker_id: int) -> bool: ...
17
+ def _retrieve_context(context_id: int) -> DistAutogradContext: ...
18
+ def _current_context() -> DistAutogradContext: ...
19
+ def _init(worker_id: int) -> None: ...
20
+ def _get_debug_info() -> Dict[str, str]: ...
21
+ def backward(
22
+ context_id: int,
23
+ roots: List[torch.Tensor],
24
+ retain_graph=False,
25
+ ) -> None: ...
26
+ def get_gradients(context_id: int) -> Dict[torch.Tensor, torch.Tensor]: ...
venv/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="type-arg"
2
+ from datetime import timedelta
3
+ from enum import Enum
4
+ from typing import Any, Dict, List, Optional, overload, Tuple, Union
5
+
6
+ import torch
7
+ from torch import Tensor
8
+ from torch._C import ScriptObject
9
+ from torch.futures import Future
10
+
11
+ # This module is defined in torch/csrc/distributed/c10d/init.cpp
12
+
13
+ _DEFAULT_FIRST_BUCKET_BYTES: int
14
+ _DEFAULT_NO_TIMEOUT: timedelta
15
+ _DEFAULT_PG_TIMEOUT: timedelta
16
+ _DEFAULT_PG_NCCL_TIMEOUT: timedelta
17
+
18
+ class BuiltinCommHookType(Enum):
19
+ ALLREDUCE = ...
20
+ FP16_COMPRESS = ...
21
+
22
+ def _register_comm_hook(reducer: Reducer, state: Any, comm_hook: Any): ...
23
+ def _register_builtin_comm_hook(
24
+ reducer: Reducer,
25
+ comm_hook_type: BuiltinCommHookType,
26
+ ): ...
27
+ def _set_global_rank(rank: int) -> None: ...
28
+ def _hash_tensors(tensors: List[Tensor]) -> int: ...
29
+
30
+ class GradBucket:
31
+ def index(self) -> int: ...
32
+ def buffer(self) -> Tensor: ...
33
+ def gradients(self) -> List[Tensor]: ...
34
+ def is_last(self) -> bool: ...
35
+ def set_buffer(self, tensor: Tensor) -> None: ...
36
+ def parameters(self) -> List[Tensor]: ...
37
+
38
+ class Reducer:
39
+ def __init__(
40
+ self,
41
+ params: List[Tensor],
42
+ bucket_indices: List[List[int]],
43
+ per_bucket_size_limits: List[int],
44
+ process_group: ProcessGroup,
45
+ expect_sparse_gradients: List[bool] = ...,
46
+ bucket_bytes_cap: int = ..., # kDefaultBucketBytesCap in reducer.hpp
47
+ find_unused_parameters: bool = ...,
48
+ gradient_as_bucket_view: bool = ...,
49
+ param_to_name_mapping: Dict[int, str] = ...,
50
+ first_bucket_types_cap: int = ..., # kDefaultFirstBucketBytes in reducer.hpp
51
+ ): ...
52
+ def prepare_for_forward(self) -> None: ...
53
+ def prepare_for_backward(self, output: List[Tensor]) -> None: ...
54
+ def get_backward_stats(self) -> List[int]: ...
55
+ def _install_post_backward_futures(self, futures: List[Future]) -> None: ...
56
+ def _rebuild_buckets(self) -> bool: ...
57
+ def _get_zeros_like_grad_buckets(self) -> List[GradBucket]: ...
58
+ def _push_all_rebuilt_params(self) -> None: ...
59
+ def _set_forward_pass_work_handle(
60
+ self,
61
+ work: Work,
62
+ use_static_world_size: bool,
63
+ ): ...
64
+ def _get_local_used_map(self) -> Tensor: ...
65
+ def _set_ddp_runtime_logging_sample_rate(self, sample_rate: int) -> None: ...
66
+ def _set_static_graph(self) -> None: ...
67
+ def _run_comm_hook(self, bucket: GradBucket) -> Future: ...
68
+ def set_logger(self, logger: Logger) -> None: ...
69
+ def _remove_autograd_hooks(self) -> None: ...
70
+ def _check_reducer_finalized(self) -> None: ...
71
+ def _set_sparse_metadata(self, global_unique_ids: Dict[str, Tensor]) -> None: ...
72
+ def _reset_state(self) -> None: ...
73
+ def _update_process_group(self, new_process_group: ProcessGroup) -> None: ...
74
+
75
+ class DDPLoggingData:
76
+ strs_map: Dict[str, str]
77
+ ints_map: Dict[str, int]
78
+
79
+ class Logger:
80
+ def __init__(self, reducer: Reducer): ...
81
+ def set_construction_data_and_log(
82
+ self,
83
+ module_name: str,
84
+ device_ids: List[int],
85
+ output_device: int,
86
+ broadcast_buffers: bool,
87
+ has_sync_bn: bool,
88
+ static_graph: bool,
89
+ ): ...
90
+ def set_runtime_stats_and_log(self) -> None: ...
91
+ def set_error_and_log(self, error: str) -> None: ...
92
+ def _get_ddp_logging_data(self) -> DDPLoggingData: ...
93
+ def _set_comm_hook_name(self, comm_hook: str) -> None: ...
94
+ def _set_uneven_input_join(self) -> None: ...
95
+ def _set_static_graph(self) -> None: ...
96
+
97
+ def get_debug_level(): ...
98
+ def set_debug_level(): ...
99
+ def set_debug_level_from_env(): ...
100
+
101
+ class DebugLevel(Enum):
102
+ OFF = ...
103
+ INFO = ...
104
+ DETAIL = ...
105
+
106
+ class ReduceOp:
107
+ def __init__(self, op: RedOpType): ...
108
+
109
+ SUM: RedOpType = ...
110
+ AVG: RedOpType = ...
111
+ PRODUCT: RedOpType = ...
112
+ MIN: RedOpType = ...
113
+ MAX: RedOpType = ...
114
+ BAND: RedOpType = ...
115
+ BOR: RedOpType = ...
116
+ BXOR: RedOpType = ...
117
+ PREMUL_SUM: RedOpType = ...
118
+ UNUSED: RedOpType = ...
119
+
120
+ class RedOpType(Enum): ...
121
+
122
+ class BroadcastOptions:
123
+ rootRank: int
124
+ rootTensor: int
125
+ timeout: timedelta
126
+ asyncOp: bool
127
+
128
+ class AllreduceOptions:
129
+ reduceOp: ReduceOp
130
+ timeout: timedelta
131
+
132
+ class AllreduceCoalescedOptions(AllreduceOptions): ...
133
+
134
+ class ReduceOptions:
135
+ reduceOp: ReduceOp
136
+ rootRank: int
137
+ rootTensor: int
138
+ timeout: timedelta
139
+
140
+ class AllgatherOptions:
141
+ timeout: timedelta
142
+ asyncOp: bool
143
+
144
+ class GatherOptions:
145
+ rootRank: int
146
+ timeout: timedelta
147
+
148
+ class ScatterOptions:
149
+ rootRank: int
150
+ timeout: timedelta
151
+ asyncOp: bool
152
+
153
+ class ReduceScatterOptions:
154
+ reduceOp: ReduceOp
155
+ timeout: timedelta
156
+ asyncOp: bool
157
+
158
+ class BarrierOptions:
159
+ device_ids: List[int]
160
+ device: torch.device
161
+ timeout: timedelta
162
+
163
+ class AllToAllOptions:
164
+ timeout: timedelta
165
+
166
+ class Store:
167
+ def set(self, key: str, value: str): ...
168
+ def get(self, key: str) -> bytes: ...
169
+ def add(self, key: str, value: int) -> int: ...
170
+ def compare_set(
171
+ self,
172
+ key: str,
173
+ expected_value: str,
174
+ desired_value: str,
175
+ ) -> bytes: ...
176
+ def delete_key(self, key: str) -> bool: ...
177
+ def num_keys(self) -> int: ...
178
+ def set_timeout(self, timeout: timedelta): ...
179
+ @overload
180
+ def wait(self, keys: List[str]): ...
181
+ @overload
182
+ def wait(self, keys: List[str], timeout: timedelta): ...
183
+
184
+ class FileStore(Store):
185
+ def __init__(self, path: str, numWorkers: int = ...): ...
186
+
187
+ class HashStore(Store):
188
+ def __init__(self): ...
189
+
190
+ class TCPStore(Store):
191
+ def __init__(
192
+ self,
193
+ host_name: str,
194
+ port: int,
195
+ world_size: Optional[int] = ...,
196
+ is_master: bool = ...,
197
+ timeout: timedelta = ...,
198
+ wait_for_workers: bool = ...,
199
+ multi_tenant: bool = ...,
200
+ master_listen_fd: Optional[int] = ...,
201
+ use_libuv: Optional[bool] = ...,
202
+ ): ...
203
+ @property
204
+ def host(self) -> str: ...
205
+ @property
206
+ def port(self) -> int: ...
207
+
208
+ class PrefixStore(Store):
209
+ def __init__(self, prefix: str, store: Store): ...
210
+ @property
211
+ def underlying_store(self) -> Store: ...
212
+
213
+ class _DistributedBackendOptions:
214
+ def __init__(self): ...
215
+ @property
216
+ def store(self) -> Store: ...
217
+ @store.setter
218
+ def store(self, store: Store) -> None: ...
219
+ @property
220
+ def group_rank(self) -> int: ...
221
+ @group_rank.setter
222
+ def group_rank(self, rank: int) -> None: ...
223
+ @property
224
+ def group_size(self) -> int: ...
225
+ @group_size.setter
226
+ def group_size(self, size: int) -> None: ...
227
+ @property
228
+ def timeout(self) -> timedelta: ...
229
+ @timeout.setter
230
+ def timeout(self, timeout: timedelta) -> None: ...
231
+ @property
232
+ def group_id(self) -> str: ...
233
+ @group_id.setter
234
+ def group_id(self, group_id: str) -> None: ...
235
+ @property
236
+ def global_ranks_in_group(self) -> List[int]: ...
237
+ @global_ranks_in_group.setter
238
+ def global_ranks_in_group(self, ranks: List[int]) -> None: ...
239
+
240
+ class Work:
241
+ def is_completed(self) -> bool: ...
242
+ def is_success(self) -> bool: ...
243
+ def exception(self) -> Any: ...
244
+ def wait(self, timeout: timedelta = ...) -> bool: ...
245
+ def get_future(self) -> Future: ...
246
+ def source_rank(self) -> int: ...
247
+ def _source_rank(self) -> int: ...
248
+ def result(self) -> List[Tensor]: ...
249
+ def synchronize(self): ...
250
+ def boxed(self) -> ScriptObject: ...
251
+ @staticmethod
252
+ def unbox(obj: ScriptObject) -> Work: ...
253
+
254
+ class Backend:
255
+ def __init__(
256
+ self,
257
+ rank: int,
258
+ size: int,
259
+ ): ...
260
+ @property
261
+ def supports_splitting(self) -> bool: ...
262
+ def rank(self) -> int: ...
263
+ def size(self) -> int: ...
264
+ def eager_connect_single_device(self, device: Optional[torch.device]) -> None: ...
265
+ def _set_sequence_number_for_group(self) -> None: ...
266
+
267
+ class ProcessGroup:
268
+ class Options:
269
+ def __init__(self, backend: str, timeout: timedelta = ...): ...
270
+ @property
271
+ def backend(self) -> str: ...
272
+ @property
273
+ def _timeout(self) -> timedelta: ...
274
+ @_timeout.setter
275
+ def _timeout(self, val: timedelta) -> None: ...
276
+
277
+ class BackendType(Enum):
278
+ UNDEFINED = ...
279
+ GLOO = ...
280
+ NCCL = ...
281
+ UCC = ...
282
+ MPI = ...
283
+ CUSTOM = ...
284
+ def __init__(self, store: Store, rank: int, size: int, options: Options): ...
285
+ def rank(self) -> int: ...
286
+ def size(self) -> int: ...
287
+ @overload
288
+ def broadcast(
289
+ self,
290
+ tensors: List[Tensor],
291
+ opts=...,
292
+ ) -> Work: ...
293
+ @overload
294
+ def broadcast(
295
+ self,
296
+ tensor: Tensor,
297
+ root: int,
298
+ ) -> Work: ...
299
+ @overload
300
+ def allreduce(
301
+ self,
302
+ tensors: List[Tensor],
303
+ opts: AllreduceOptions = ...,
304
+ ) -> Work: ...
305
+ @overload
306
+ def allreduce(
307
+ self,
308
+ tensors: List[Tensor],
309
+ op=...,
310
+ ) -> Work: ...
311
+ @overload
312
+ def allreduce(
313
+ self,
314
+ tensor: Tensor,
315
+ op=...,
316
+ ) -> Work: ...
317
+ def allreduce_coalesced(
318
+ self,
319
+ tensors: List[Tensor],
320
+ opts=...,
321
+ ) -> Work: ...
322
+ def reduce_scatter_tensor_coalesced(
323
+ self,
324
+ outputTensors: List[Tensor],
325
+ inputTensors: List[Tensor],
326
+ opts: Optional[ReduceScatterOptions] = None,
327
+ ) -> Work: ...
328
+ @overload
329
+ def reduce(
330
+ self,
331
+ tensors: List[Tensor],
332
+ opts=...,
333
+ ) -> Work: ...
334
+ @overload
335
+ def reduce(
336
+ self,
337
+ tensor: Tensor,
338
+ root: int,
339
+ op=...,
340
+ ) -> Work: ...
341
+ @overload
342
+ def allgather(
343
+ self,
344
+ output_tensors: List[List[Tensor]],
345
+ input_tensors: List[Tensor],
346
+ opts=...,
347
+ ) -> Work: ...
348
+ @overload
349
+ def allgather(
350
+ self,
351
+ output_tensors: List[Tensor],
352
+ input_tensor: Tensor,
353
+ ) -> Work: ...
354
+ def _allgather_base(
355
+ self,
356
+ output: Tensor,
357
+ input: Tensor,
358
+ opts=...,
359
+ ) -> Work: ...
360
+ def allgather_coalesced(
361
+ self,
362
+ output_lists: List[List[Tensor]],
363
+ input_list: List[Tensor],
364
+ opts=...,
365
+ ) -> Work: ...
366
+ def allgather_into_tensor_coalesced(
367
+ self,
368
+ output_lists: List[Tensor],
369
+ input_list: List[Tensor],
370
+ opts=...,
371
+ ) -> Work: ...
372
+ @overload
373
+ def gather(
374
+ self,
375
+ output_tensors: List[List[Tensor]],
376
+ input_tensors: List[Tensor],
377
+ opts=...,
378
+ ) -> Work: ...
379
+ @overload
380
+ def gather(
381
+ self,
382
+ output_tensors: List[Tensor],
383
+ input_tensor: Tensor,
384
+ root: int,
385
+ ) -> Work: ...
386
+ @overload
387
+ def scatter(
388
+ self,
389
+ output_tensors: List[Tensor],
390
+ input_tensors: List[List[Tensor]],
391
+ opts=...,
392
+ ) -> Work: ...
393
+ @overload
394
+ def scatter(
395
+ self,
396
+ output_tensor: Tensor,
397
+ input_tensors: List[Tensor],
398
+ root: int,
399
+ ) -> Work: ...
400
+ @overload
401
+ def reduce_scatter(
402
+ self,
403
+ output_tensors: List[Tensor],
404
+ input_tensors: List[List[Tensor]],
405
+ opts=...,
406
+ ) -> Work: ...
407
+ @overload
408
+ def reduce_scatter(
409
+ self,
410
+ output_tensors: Tensor,
411
+ input_tensor: List[Tensor],
412
+ ) -> Work: ...
413
+ def _reduce_scatter_base(
414
+ self,
415
+ outputTensor: Tensor,
416
+ inputTensor: Tensor,
417
+ opts: Optional[ReduceScatterOptions],
418
+ ) -> Work: ...
419
+ @overload
420
+ def alltoall_base(
421
+ self,
422
+ output_tensor: Tensor,
423
+ input_tensor: Tensor,
424
+ output_split_sizes: List[int],
425
+ input_split_sizes: List[int],
426
+ opts=...,
427
+ ) -> Work: ...
428
+ @overload
429
+ def alltoall_base(
430
+ self,
431
+ output: Tensor,
432
+ input: Tensor,
433
+ output_split_sizes: List[int],
434
+ input_split_sizes: List[int],
435
+ ) -> Work: ...
436
+ @overload
437
+ def alltoall(
438
+ self,
439
+ output_tensor: List[Tensor],
440
+ input_tensor: List[Tensor],
441
+ opts=...,
442
+ ) -> Work: ...
443
+ @overload
444
+ def alltoall(
445
+ self,
446
+ output: List[Tensor],
447
+ input: List[Tensor],
448
+ ) -> Work: ...
449
+ def send(
450
+ self,
451
+ tensors: List[Tensor],
452
+ dstRank: int,
453
+ tag: int,
454
+ ) -> Work: ...
455
+ def recv(
456
+ self,
457
+ tensors: List[Tensor],
458
+ srcRank: int,
459
+ tag: int,
460
+ ) -> Work: ...
461
+ def recv_anysource(self, tensors: List[Tensor], tag: int) -> Work: ...
462
+ def barrier(self, opts=...) -> Work: ...
463
+ def boxed(self) -> ScriptObject: ...
464
+ @staticmethod
465
+ def unbox(obj: ScriptObject) -> ProcessGroup: ...
466
+ def _start_coalescing(self, device: torch.device) -> None: ...
467
+ def _end_coalescing(self, device: torch.device) -> Work: ...
468
+ def _get_backend_name(self) -> str: ...
469
+ def _backend_id(self, backend_type: BackendType) -> int: ...
470
+ @property
471
+ def _device_types(self) -> List[torch.device]: ...
472
+ def _get_backend(self, device: torch.device) -> Backend: ...
473
+ def _register_backend(
474
+ self,
475
+ device: torch.device,
476
+ backend_type: BackendType,
477
+ backend: Optional[Backend],
478
+ ) -> None: ...
479
+ def _set_group_name(self, name: str) -> None: ...
480
+ def name(self) -> str: ...
481
+ def _has_hooks(self) -> bool: ...
482
+ def _wait_for_pending_works(self) -> None: ...
483
+ def _set_sequence_number_for_group(self) -> None: ...
484
+ @property
485
+ def bound_device_id(self) -> Optional[torch.device]: ...
486
+ @bound_device_id.setter
487
+ def bound_device_id(self, device: Optional[torch.device]) -> None: ...
488
+ @property
489
+ def group_name(self) -> str: ...
490
+
491
+ class ProcessGroupRoundRobin(ProcessGroup): ...
492
+
493
+ def _round_robin_process_groups(
494
+ process_groups: List[ProcessGroup],
495
+ ) -> ProcessGroupRoundRobin: ...
496
+
497
+ class ProcessGroupGloo(Backend):
498
+ class Device: ...
499
+ class Options: ...
500
+
501
+ def __init__(
502
+ self,
503
+ store: Store,
504
+ rank: int,
505
+ size: int,
506
+ timeout: timedelta,
507
+ ): ...
508
+ @staticmethod
509
+ def create_device(hostname="", interface="") -> Device: ...
510
+ @staticmethod
511
+ def create_default_device() -> Device: ...
512
+ def _set_default_timeout(self, timeout) -> None: ...
513
+
514
+ class _ProcessGroupWrapper(Backend):
515
+ def __init__(self, pg: Backend, gloo_pg: ProcessGroupGloo): ...
516
+ wrapped_pg: Backend
517
+
518
+ class ProcessGroupNCCL(Backend):
519
+ class Options:
520
+ def __init__(self, timeout: Optional[timedelta] = None): ...
521
+ @property
522
+ def backend(self) -> str: ...
523
+ @property
524
+ def _timeout(self) -> timedelta: ...
525
+ @_timeout.setter
526
+ def _timeout(self, val: timedelta) -> None: ...
527
+ @property
528
+ def _is_high_priority_stream(self) -> bool: ...
529
+ @_is_high_priority_stream.setter
530
+ def _is_high_priority_stream(self, val: bool) -> None: ...
531
+
532
+ def __init__(
533
+ self,
534
+ store: Store,
535
+ rank: int,
536
+ size: int,
537
+ timeout: timedelta,
538
+ ): ...
539
+ def _group_start(self) -> None: ...
540
+ def _group_end(self) -> None: ...
541
+ def _set_default_timeout(self, timeout) -> None: ...
542
+ def _shutdown(self) -> None: ...
543
+ @property
544
+ def uid(self) -> int: ...
545
+
546
+ class ProcessGroupUCC(Backend):
547
+ def __init__(
548
+ self,
549
+ store: Store,
550
+ rank: int,
551
+ size: int,
552
+ timeout: timedelta,
553
+ ): ...
554
+
555
+ class ProcessGroupMPI(Backend):
556
+ def __init__(
557
+ self,
558
+ rank: int,
559
+ size: int,
560
+ pgComm: int,
561
+ ): ...
562
+ @staticmethod
563
+ def create(ranks: List[int]) -> ProcessGroupMPI: ...
564
+
565
+ def _compute_bucket_assignment_by_size(
566
+ tensors: List[Tensor],
567
+ bucket_size_limits: List[int],
568
+ expect_sparse_gradient: List[bool] = ...,
569
+ tensor_indices: List[int] = ...,
570
+ ) -> Tuple[List[List[int]], List[int]]: ...
571
+ def _broadcast_coalesced(
572
+ process_group: ProcessGroup,
573
+ tensors: List[Tensor],
574
+ buffer_size: int,
575
+ src: int,
576
+ ): ...
577
+ def _test_python_store(store: Store): ...
578
+ def _verify_params_across_processes(
579
+ process_group: ProcessGroup,
580
+ params: List[Tensor],
581
+ logger: Optional[Logger],
582
+ ): ...
583
+ def _make_nccl_premul_sum(factor: Union[float, List[Tensor]]) -> ReduceOp: ...
584
+ def _register_process_group(
585
+ group_name: str,
586
+ process_group: ProcessGroup,
587
+ ) -> None: ...
588
+ def _resolve_process_group(group_name: str) -> ProcessGroup: ...
589
+ def _unregister_all_process_groups() -> None: ...
590
+ def _unregister_process_group(group_name: str) -> None: ...
venv/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="type-arg"
2
+ from datetime import timedelta
3
+ from typing import Any, Dict, Generic, List, Optional, overload, Tuple, Type, TypeVar
4
+
5
+ import torch
6
+
7
+ from . import Future
8
+ from ._autograd import ProfilerEvent
9
+ from ._distributed_c10d import Store
10
+ from ._profiler import ProfilerConfig
11
+
12
+ # This module is defined in torch/csrc/distributed/rpc/init.cpp
13
+
14
+ _DEFAULT_INIT_METHOD: str
15
+ _DEFAULT_NUM_WORKER_THREADS: int
16
+ _UNSET_RPC_TIMEOUT: float
17
+ _DEFAULT_RPC_TIMEOUT_SEC: float
18
+
19
+ _T = TypeVar("_T")
20
+
21
+ class RpcBackendOptions:
22
+ rpc_timeout: float
23
+ init_method: str
24
+ def __init__(
25
+ self,
26
+ rpc_timeout: float = ...,
27
+ init_method: str = ...,
28
+ ): ...
29
+
30
+ class WorkerInfo:
31
+ def __init__(self, name: str, worker_id: int): ...
32
+ @property
33
+ def name(self) -> str: ...
34
+ @property
35
+ def id(self) -> int: ...
36
+ def __eq__(self, other: object) -> bool: ...
37
+
38
+ class RpcAgent:
39
+ def join(self, shutdown: bool = False, timeout: float = 0): ...
40
+ def sync(self): ...
41
+ def shutdown(self): ...
42
+ @overload
43
+ def get_worker_info(self) -> WorkerInfo: ...
44
+ @overload
45
+ def get_worker_info(self, workerName: str) -> WorkerInfo: ...
46
+ def get_worker_infos(self) -> List[WorkerInfo]: ...
47
+ def _get_device_map(self, dst: WorkerInfo) -> Dict[torch.device, torch.device]: ...
48
+ def get_debug_info(self) -> Dict[str, str]: ...
49
+ def get_metrics(self) -> Dict[str, str]: ...
50
+
51
+ class PyRRef(Generic[_T]):
52
+ def __init__(self, value: _T, type_hint: Any = None) -> None: ...
53
+ def is_owner(self) -> bool: ...
54
+ def confirmed_by_owner(self) -> bool: ...
55
+ def owner(self) -> WorkerInfo: ...
56
+ def owner_name(self) -> str: ...
57
+ def to_here(self, timeout: float = ...) -> _T: ...
58
+ def local_value(self) -> Any: ...
59
+ def rpc_sync(self, timeout: float = ...) -> Any: ...
60
+ def rpc_async(self, timeout: float = ...) -> Any: ...
61
+ def remote(self, timeout: float = ...) -> Any: ...
62
+ def _serialize(self) -> Tuple: ...
63
+ @staticmethod
64
+ def _deserialize(tp: Tuple) -> PyRRef: ...
65
+ def _get_type(self) -> Type[_T]: ...
66
+ def _get_future(self) -> Future[_T]: ...
67
+ def _get_profiling_future(self) -> Future[_T]: ...
68
+ def _set_profiling_future(self, profilingFuture: Future[_T]): ...
69
+
70
+ class _TensorPipeRpcBackendOptionsBase(RpcBackendOptions):
71
+ num_worker_threads: int
72
+ device_maps: Dict[str, Dict[torch.device, torch.device]]
73
+ devices: List[torch.device]
74
+ def __init__(
75
+ self,
76
+ num_worker_threads: int,
77
+ _transports: Optional[List],
78
+ _channels: Optional[List],
79
+ rpc_timeout: float = ...,
80
+ init_method: str = ...,
81
+ device_maps: Dict[str, Dict[torch.device, torch.device]] = {}, # noqa: B006
82
+ devices: List[torch.device] = [], # noqa: B006
83
+ ): ...
84
+ def _set_device_map(
85
+ self,
86
+ to: str,
87
+ device_map: Dict[torch.device, torch.device],
88
+ ): ...
89
+
90
+ class TensorPipeAgent(RpcAgent):
91
+ def __init__(
92
+ self,
93
+ store: Store,
94
+ name: str,
95
+ worker_id: int,
96
+ world_size: Optional[int],
97
+ opts: _TensorPipeRpcBackendOptionsBase,
98
+ reverse_device_maps: Dict[str, Dict[torch.device, torch.device]],
99
+ devices: List[torch.device],
100
+ ): ...
101
+ def join(self, shutdown: bool = False, timeout: float = 0): ...
102
+ def shutdown(self): ...
103
+ @overload
104
+ def get_worker_info(self) -> WorkerInfo: ...
105
+ @overload
106
+ def get_worker_info(self, workerName: str) -> WorkerInfo: ...
107
+ @overload
108
+ def get_worker_info(self, id: int) -> WorkerInfo: ...
109
+ def get_worker_infos(self) -> List[WorkerInfo]: ...
110
+ def _get_device_map(self, dst: WorkerInfo) -> Dict[torch.device, torch.device]: ...
111
+ def _update_group_membership(
112
+ self,
113
+ worker_info: WorkerInfo,
114
+ my_devices: List[torch.device],
115
+ reverse_device_map: Dict[str, Dict[torch.device, torch.device]],
116
+ is_join: bool,
117
+ ): ...
118
+ def _get_backend_options(self) -> _TensorPipeRpcBackendOptionsBase: ...
119
+ @property
120
+ def is_static_group(self) -> bool: ...
121
+ @property
122
+ def store(self) -> Store: ...
123
+
124
+ def _is_current_rpc_agent_set() -> bool: ...
125
+ def _get_current_rpc_agent() -> RpcAgent: ...
126
+ def _set_and_start_rpc_agent(agent: RpcAgent): ...
127
+ def _reset_current_rpc_agent(): ...
128
+ def _delete_all_user_and_unforked_owner_rrefs(timeout: timedelta = ...): ...
129
+ def _destroy_rref_context(ignoreRRefLeak: bool): ...
130
+ def _rref_context_get_debug_info() -> Dict[str, str]: ...
131
+ def _cleanup_python_rpc_handler(): ...
132
+ def _invoke_rpc_builtin(
133
+ dst: WorkerInfo,
134
+ opName: str,
135
+ rpcTimeoutSeconds: float,
136
+ *args: Any,
137
+ **kwargs: Any,
138
+ ): ...
139
+ def _invoke_rpc_python_udf(
140
+ dst: WorkerInfo,
141
+ pickledPythonUDF: str,
142
+ tensors: List[torch.Tensor],
143
+ rpcTimeoutSeconds: float,
144
+ isAsyncExecution: bool,
145
+ ): ...
146
+ def _invoke_rpc_torchscript(
147
+ dstWorkerName: str,
148
+ qualifiedNameStr: str,
149
+ argsTuple: Tuple,
150
+ kwargsDict: Dict,
151
+ rpcTimeoutSeconds: float,
152
+ isAsyncExecution: bool,
153
+ ): ...
154
+ def _invoke_remote_builtin(
155
+ dst: WorkerInfo,
156
+ opName: str,
157
+ rpcTimeoutSeconds: float,
158
+ *args: Any,
159
+ **kwargs: Any,
160
+ ): ...
161
+ def _invoke_remote_python_udf(
162
+ dst: WorkerInfo,
163
+ pickledPythonUDF: str,
164
+ tensors: List[torch.Tensor],
165
+ rpcTimeoutSeconds: float,
166
+ isAsyncExecution: bool,
167
+ ): ...
168
+ def _invoke_remote_torchscript(
169
+ dstWorkerName: WorkerInfo,
170
+ qualifiedNameStr: str,
171
+ rpcTimeoutSeconds: float,
172
+ isAsyncExecution: bool,
173
+ *args: Any,
174
+ **kwargs: Any,
175
+ ): ...
176
+ def get_rpc_timeout() -> float: ...
177
+ def enable_gil_profiling(flag: bool): ...
178
+ def _set_rpc_timeout(rpcTimeoutSeconds: float): ...
179
+
180
+ class RemoteProfilerManager:
181
+ @staticmethod
182
+ def set_current_profiling_key(key: str): ...
183
+
184
+ def _enable_server_process_global_profiler(new_config: ProfilerConfig): ...
185
+ def _disable_server_process_global_profiler() -> List[List[List[ProfilerEvent]]]: ...
186
+ def _set_profiler_node_id(default_node_id: int): ...
187
+ def _enable_jit_rref_pickle(): ...
188
+ def _disable_jit_rref_pickle(): ...
venv/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List
2
+
3
+ import torch
4
+
5
+ from ._distributed_c10d import Store
6
+ from ._distributed_rpc import _TensorPipeRpcBackendOptionsBase, TensorPipeAgent
7
+
8
+ # This module is defined in torch/csrc/distributed/rpc/testing/init.cpp
9
+
10
+ class FaultyTensorPipeRpcBackendOptions(_TensorPipeRpcBackendOptionsBase):
11
+ def __init__(
12
+ self,
13
+ num_worker_threads: int,
14
+ rpc_timeout: float,
15
+ init_method: str,
16
+ messages_to_fail: List[str],
17
+ messages_to_delay: Dict[str, float],
18
+ num_fail_sends: int,
19
+ ): ...
20
+ num_send_recv_threads: int
21
+ messages_to_fail: List[str]
22
+ messages_to_delay: Dict[str, float]
23
+ num_fail_sends: int
24
+
25
+ class FaultyTensorPipeAgent(TensorPipeAgent):
26
+ def __init__(
27
+ self,
28
+ store: Store,
29
+ name: str,
30
+ rank: int,
31
+ world_size: int,
32
+ options: FaultyTensorPipeRpcBackendOptions,
33
+ reverse_device_maps: Dict[str, Dict[torch.device, torch.device]],
34
+ devices: List[torch.device],
35
+ ): ...
venv/lib/python3.10/site-packages/torch/_C/_functions.pyi ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import AnyStr, List
2
+
3
+ from torch import Tensor
4
+
5
+ class UndefinedGrad:
6
+ def __init__(self) -> None: ...
7
+ def __call__(self, *inputs: Tensor) -> List[Tensor]: ...
8
+
9
+ class DelayedError:
10
+ def __init__(self, msg: AnyStr, num_inputs: int) -> None: ...
11
+ def __call__(self, inputs: List[Tensor]) -> List[Tensor]: ...
venv/lib/python3.10/site-packages/torch/_C/_functorch.pyi ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import Optional, Tuple
3
+
4
+ from torch import Tensor
5
+
6
+ # Defined in torch/csrc/functorch/init.cpp
7
+
8
+ def _set_dynamic_layer_keys_included(included: bool) -> None: ...
9
+ def get_unwrapped(tensor: Tensor) -> Tensor: ...
10
+ def is_batchedtensor(tensor: Tensor) -> bool: ...
11
+ def is_functionaltensor(tensor: Tensor) -> bool: ...
12
+ def is_functorch_wrapped_tensor(tensor: Tensor) -> bool: ...
13
+ def is_gradtrackingtensor(tensor: Tensor) -> bool: ...
14
+ def maybe_get_bdim(tensor: Tensor) -> int: ...
15
+ def maybe_get_level(tensor: Tensor) -> int: ...
16
+ def maybe_current_level() -> Optional[int]: ...
17
+ def unwrap_if_dead(tensor: Tensor) -> Tensor: ...
18
+ def _unwrap_for_grad(tensor: Tensor, level: int) -> Tensor: ...
19
+ def _wrap_for_grad(tensor: Tensor, level: int) -> Tensor: ...
20
+ def _unwrap_batched(tensor: Tensor, level: int) -> Tuple[Tensor, Optional[int]]: ...
21
+ def current_level() -> int: ...
22
+ def _add_batch_dim(tensor: Tensor, bdim: int, level: int) -> Tensor: ...
23
+ def set_single_level_autograd_function_allowed(allowed: bool) -> None: ...
24
+ def get_single_level_autograd_function_allowed() -> bool: ...
25
+ def _unwrap_functional_tensor(tensor: Tensor, reapply_views: bool) -> Tensor: ...
26
+ def _wrap_functional_tensor(tensor: Tensor, level: int) -> Tensor: ...
27
+ def _vmap_increment_nesting(batch_size: int, randomness: str) -> int: ...
28
+ def _vmap_decrement_nesting() -> int: ...
29
+ def _grad_increment_nesting() -> int: ...
30
+ def _grad_decrement_nesting() -> int: ...
31
+
32
+ # Defined in aten/src/ATen/functorch/Interpreter.h
33
+ class TransformType(Enum):
34
+ Torch: TransformType = ...
35
+ Vmap: TransformType = ...
36
+ Grad: TransformType = ...
37
+ Jvp: TransformType = ...
38
+ Functionalize: TransformType = ...
39
+
40
+ class RandomnessType(Enum):
41
+ Error: TransformType = ...
42
+ Same: TransformType = ...
43
+ Different: TransformType = ...
44
+
45
+ class CInterpreter:
46
+ def key(self) -> TransformType: ...
47
+ def level(self) -> int: ...
48
+
49
+ class CGradInterpreterPtr:
50
+ def __init__(self, interpreter: CInterpreter): ...
51
+ def lift(self, Tensor) -> Tensor: ...
52
+ def prevGradMode(self) -> bool: ...
53
+
54
+ class CJvpInterpreterPtr:
55
+ def __init__(self, interpreter: CInterpreter): ...
56
+ def lift(self, Tensor) -> Tensor: ...
57
+ def prevFwdGradMode(self) -> bool: ...
58
+
59
+ class CFunctionalizeInterpreterPtr:
60
+ def __init__(self, interpreter: CInterpreter): ...
61
+ def key(self) -> TransformType: ...
62
+ def level(self) -> int: ...
63
+ def functionalizeAddBackViews(self) -> bool: ...
64
+
65
+ class CVmapInterpreterPtr:
66
+ def __init__(self, interpreter: CInterpreter): ...
67
+ def key(self) -> TransformType: ...
68
+ def level(self) -> int: ...
69
+ def batchSize(self) -> int: ...
70
+ def randomness(self) -> RandomnessType: ...
71
+
72
+ class DynamicLayer: ...
73
+
74
+ def get_interpreter_stack() -> list[CInterpreter]: ...
75
+ def peek_interpreter_stack() -> CInterpreter: ...
76
+ def pop_dynamic_layer_stack() -> DynamicLayer: ...
77
+ def push_dynamic_layer_stack(dl: DynamicLayer) -> int: ...
venv/lib/python3.10/site-packages/torch/_C/_itt.pyi ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Defined in torch/csrc/itt.cpp
2
+ def is_available() -> None: ...
3
+ def rangePush(message: str) -> None: ...
4
+ def rangePop() -> None: ...
5
+ def mark(message: str) -> None: ...
venv/lib/python3.10/site-packages/torch/_C/_lazy.pyi ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from torch import Tensor
4
+
5
+ # defined in torch/csrc/lazy/python/init.cpp
6
+ def _mark_step(device: str, devices: List[str], wait: bool): ...
7
+ def _wait_device_ops(devices: List[str]): ...
8
+ def _reset_metrics(): ...
9
+ def _counter_names() -> List[str]: ...
10
+ def _counter_value(name: str) -> int: ...
11
+ def _metrics_report() -> str: ...
12
+ def _get_graph_hash(tensors: List[Tensor]) -> str: ...
13
+ def _sync_multi(
14
+ tensors: List[Tensor],
15
+ devices: List[str],
16
+ wait: bool = True,
17
+ sync_ltc_data: bool = True,
18
+ ): ...
19
+ def _get_tensor_id(tensor: Tensor) -> int: ...
20
+ def _get_tensors_text(tensors: List[Tensor]) -> str: ...
21
+ def _get_tensors_dot(tensors: List[Tensor]) -> str: ...
22
+ def _get_tensors_backend(tensors: List[Tensor]) -> str: ...
23
+ def _get_force_fallback() -> str: ...
24
+ def _set_force_fallback(newval: str): ...
25
+ def _clear_ir_cache(): ...
26
+ def _dump_ir_cache(filename: str): ...
27
+ def _set_reuse_ir(val: bool): ...
28
+ def _get_default_device_type(): ...
venv/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # defined in torch/csrc/lazy/python/init.cpp
2
+
3
+ from typing import Any, List, Tuple
4
+
5
+ from torch import Tensor
6
+
7
+ def _init(): ...
8
+ def _get_tensors_ts_device_data_node(
9
+ tensors: List[Tensor],
10
+ ) -> Tuple[List[int], List[Any]]: ...
11
+ def _run_cached_graph(hash_str: str, graph_inputs: List[Any]) -> List[Tensor]: ...
venv/lib/python3.10/site-packages/torch/_C/_monitor.pyi ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Defined in torch/csrc/monitor/python_init.cpp
2
+
3
+ import datetime
4
+ from enum import Enum
5
+ from typing import Callable, Dict, List, Union
6
+
7
+ class Aggregation(Enum):
8
+ VALUE = ...
9
+ MEAN = ...
10
+ COUNT = ...
11
+ SUM = ...
12
+ MAX = ...
13
+ MIN = ...
14
+
15
+ class Stat:
16
+ name: str
17
+ count: int
18
+ def __init__(
19
+ self,
20
+ name: str,
21
+ aggregations: List[Aggregation],
22
+ window_size: int,
23
+ max_samples: int = -1,
24
+ ) -> None: ...
25
+ def add(self, v: float) -> None: ...
26
+ def get(self) -> Dict[Aggregation, float]: ...
27
+
28
+ class Event:
29
+ name: str
30
+ timestamp: datetime.datetime
31
+ data: Dict[str, Union[int, float, bool, str]]
32
+ def __init__(
33
+ self,
34
+ name: str,
35
+ timestamp: datetime.datetime,
36
+ data: Dict[str, Union[int, float, bool, str]],
37
+ ) -> None: ...
38
+
39
+ def log_event(e: Event) -> None: ...
40
+
41
+ class EventHandlerHandle: ...
42
+
43
+ def register_event_handler(handler: Callable[[Event], None]) -> EventHandlerHandle: ...
44
+ def unregister_event_handler(handle: EventHandlerHandle) -> None: ...
venv/lib/python3.10/site-packages/torch/_C/_nn.pyi ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="type-arg"
2
+ from typing import List, Optional, overload, Sequence, Tuple, Union
3
+
4
+ from torch import memory_format, Tensor
5
+ from torch.types import _bool, _device, _dtype, _int, _size
6
+
7
+ # Defined in tools/autograd/templates/python_nn_functions.cpp
8
+
9
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
10
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
11
+ def avg_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ...
12
+ def avg_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ...
13
+ def elu_(input: Tensor, alpha: float = ...) -> Tensor: ...
14
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ...
15
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ...
16
+ def gelu(input: Tensor, approximate: str = ...) -> Tensor: ...
17
+ def hardsigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
18
+ def hardtanh(input: Tensor, min_val: float = ..., max_val: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ...
19
+ def hardtanh_(input: Tensor, min_val: float = ..., max_val: float = ...) -> Tensor: ...
20
+ def leaky_relu(input: Tensor, negative_slope: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ...
21
+ def leaky_relu_(input: Tensor, negative_slope: float = ...) -> Tensor: ...
22
+ def linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ...
23
+ def log_sigmoid(input: Tensor) -> Tensor: ...
24
+ def one_hot(tensor: Tensor, num_classes: int = ...) -> Tensor: ...
25
+ def pad(input: Tensor, pad: Sequence[int], mode: str = ..., value: Optional[float] = None) -> Tensor: ...
26
+ def scaled_dot_product_attention(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None) -> Tensor: ...
27
+ def softplus(input: Tensor, beta: float = ..., threshold: float = ...) -> Tensor: ...
28
+ def softshrink(input: Tensor, lambd: float = ...) -> Tensor: ...
29
+
30
+ # Defined in aten/src/ATen/native/mkldnn/Linear.cpp
31
+ def mkldnn_linear(input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: ...
32
+
33
+ # Defined at aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp
34
+ def mkldnn_reorder_conv2d_weight(
35
+ self: Tensor,
36
+ padding: List,
37
+ stride: List,
38
+ dilatation: List,
39
+ groups: int,
40
+ ) -> Tensor: ...
41
+ def mkldnn_reorder_conv3d_weight(
42
+ self: Tensor,
43
+ padding: List,
44
+ stride: List,
45
+ dilatation: List,
46
+ groups: int,
47
+ ) -> Tensor: ...
48
+
49
+ # Defined in aten/src/ATen/native/mkldnn/Prelu.cpp
50
+ def mkldnn_prelu(input: Tensor, weight: Tensor) -> Tensor: ...
51
+
52
+ # Defined at tools/autograd/templates/python_nn_functions.cpp
53
+ @overload
54
+ def _parse_to(
55
+ device: _device,
56
+ dtype: _dtype,
57
+ non_blocking: _bool,
58
+ copy: _bool,
59
+ *,
60
+ memory_format: memory_format,
61
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
62
+ @overload
63
+ def _parse_to(
64
+ dtype: _dtype,
65
+ non_blocking: _bool,
66
+ copy: _bool,
67
+ *,
68
+ memory_format: memory_format,
69
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
70
+ @overload
71
+ def _parse_to(
72
+ tensor: Tensor,
73
+ non_blocking: _bool,
74
+ copy: _bool,
75
+ *,
76
+ memory_format: memory_format,
77
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
78
+
79
+ # Defined in aten/src/ATen/native/PadSequence.cpp
80
+ def pad_sequence(
81
+ sequences: List[Tensor],
82
+ batch_first: bool = False,
83
+ padding_value: float = ...,
84
+ ) -> Tensor: ...
85
+ def flatten_dense_tensors(tensors: List[Tensor]) -> Tensor: ...
86
+ def unflatten_dense_tensors(flat: Tensor, tensors: List[Tensor]) -> List[Tensor]: ...
venv/lib/python3.10/site-packages/torch/_C/_nvtx.pyi ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Defined in torch/csrc/cuda/shared/nvtx.cpp
2
+ def rangePushA(message: str) -> int: ...
3
+ def rangePop() -> int: ...
4
+ def rangeStartA(message: str) -> int: ...
5
+ def rangeEnd(int) -> None: ...
6
+ def markA(message: str) -> None: ...
venv/lib/python3.10/site-packages/torch/_C/_onnx.pyi ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Defined in torch/csrc/onnx/init.cpp
2
+
3
+ from enum import Enum
4
+
5
+ _CAFFE2_ATEN_FALLBACK: bool
6
+ PRODUCER_VERSION: str
7
+
8
+ class TensorProtoDataType(Enum):
9
+ UNDEFINED = ...
10
+ FLOAT = ...
11
+ UINT8 = ...
12
+ INT8 = ...
13
+ UINT16 = ...
14
+ INT16 = ...
15
+ INT32 = ...
16
+ INT64 = ...
17
+ STRING = ...
18
+ BOOL = ...
19
+ FLOAT16 = ...
20
+ DOUBLE = ...
21
+ UINT32 = ...
22
+ UINT64 = ...
23
+ COMPLEX64 = ...
24
+ COMPLEX128 = ...
25
+ BFLOAT16 = ...
26
+ FLOAT8E5M2 = ...
27
+ FLOAT8E4M3FN = ...
28
+ FLOAT8E5M2FNUZ = ...
29
+ FLOAT8E4M3FNUZ = ...
30
+
31
+ class OperatorExportTypes(Enum):
32
+ ONNX = ...
33
+ ONNX_ATEN = ...
34
+ ONNX_ATEN_FALLBACK = ...
35
+ ONNX_FALLTHROUGH = ...
36
+
37
+ class TrainingMode(Enum):
38
+ EVAL = ...
39
+ PRESERVE = ...
40
+ TRAINING = ...
venv/lib/python3.10/site-packages/torch/_C/_profiler.pyi ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import Any, Dict, List, Literal, Optional, Tuple, Union
3
+
4
+ from torch._C import device, dtype, layout
5
+ from typing_extensions import TypeAlias
6
+
7
+ # defined in torch/csrc/profiler/python/init.cpp
8
+
9
+ class RecordScope(Enum):
10
+ FUNCTION = ...
11
+ BACKWARD_FUNCTION = ...
12
+ TORCHSCRIPT_FUNCTION = ...
13
+ KERNEL_FUNCTION_DTYPE = ...
14
+ CUSTOM_CLASS = ...
15
+ BUILD_FEATURE = ...
16
+ LITE_INTERPRETER = ...
17
+ USER_SCOPE = ...
18
+ STATIC_RUNTIME_OP = ...
19
+ STATIC_RUNTIME_MODEL = ...
20
+
21
+ class ProfilerState(Enum):
22
+ Disable = ...
23
+ CPU = ...
24
+ CUDA = ...
25
+ NVTX = ...
26
+ ITT = ...
27
+ KINETO = ...
28
+ KINETO_GPU_FALLBACK = ...
29
+ KINETO_PRIVATEUSE1_FALLBACK = ...
30
+ KINETO_PRIVATEUSE1 = ...
31
+
32
+ class ActiveProfilerType(Enum):
33
+ NONE = ...
34
+ LEGACY = ...
35
+ KINETO = ...
36
+ NVTX = ...
37
+ ITT = ...
38
+
39
+ class ProfilerActivity(Enum):
40
+ CPU = ...
41
+ CUDA = ...
42
+ MTIA = ...
43
+ PrivateUse1 = ...
44
+
45
+ class _EventType(Enum):
46
+ TorchOp = ...
47
+ Backend = ...
48
+ Allocation = ...
49
+ OutOfMemory = ...
50
+ PyCall = ...
51
+ PyCCall = ...
52
+ Kineto = ...
53
+
54
+ class _ExperimentalConfig:
55
+ def __init__(
56
+ self,
57
+ profiler_metrics: List[str] = ...,
58
+ profiler_measure_per_kernel: bool = ...,
59
+ verbose: bool = ...,
60
+ performance_events: List[str] = ...,
61
+ enable_cuda_sync_events: bool = ...,
62
+ ) -> None: ...
63
+
64
+ class ProfilerConfig:
65
+ def __init__(
66
+ self,
67
+ state: ProfilerState,
68
+ report_input_shapes: bool,
69
+ profile_memory: bool,
70
+ with_stack: bool,
71
+ with_flops: bool,
72
+ with_modules: bool,
73
+ experimental_config: _ExperimentalConfig,
74
+ ) -> None: ...
75
+
76
+ class _ProfilerEvent:
77
+ start_tid: int
78
+ start_time_ns: int
79
+ children: List[_ProfilerEvent]
80
+
81
+ # TODO(robieta): remove in favor of `self.typed`
82
+ extra_fields: Union[
83
+ _ExtraFields_TorchOp,
84
+ _ExtraFields_Backend,
85
+ _ExtraFields_Allocation,
86
+ _ExtraFields_OutOfMemory,
87
+ _ExtraFields_PyCall,
88
+ _ExtraFields_PyCCall,
89
+ _ExtraFields_Kineto,
90
+ ]
91
+
92
+ @property
93
+ def typed(
94
+ self,
95
+ ) -> Union[
96
+ Tuple[Literal[_EventType.TorchOp], _ExtraFields_TorchOp],
97
+ Tuple[Literal[_EventType.Backend], _ExtraFields_Backend],
98
+ Tuple[Literal[_EventType.Allocation], _ExtraFields_Allocation],
99
+ Tuple[Literal[_EventType.OutOfMemory], _ExtraFields_OutOfMemory],
100
+ Tuple[Literal[_EventType.PyCall], _ExtraFields_PyCall],
101
+ Tuple[Literal[_EventType.PyCCall], _ExtraFields_PyCCall],
102
+ Tuple[Literal[_EventType.Kineto], _ExtraFields_Kineto],
103
+ ]: ...
104
+ @property
105
+ def name(self) -> str: ...
106
+ @property
107
+ def tag(self) -> _EventType: ...
108
+ @property
109
+ def id(self) -> int: ...
110
+ @property
111
+ def parent(self) -> Optional[_ProfilerEvent]: ...
112
+ @property
113
+ def correlation_id(self) -> int: ...
114
+ @property
115
+ def end_time_ns(self) -> int: ...
116
+ @property
117
+ def duration_time_ns(self) -> int: ...
118
+
119
+ class _TensorMetadata:
120
+ impl_ptr: Optional[int]
121
+ storage_data_ptr: Optional[int]
122
+ id: Optional[int]
123
+
124
+ @property
125
+ def allocation_id(self) -> Optional[int]: ...
126
+ @property
127
+ def layout(self) -> layout: ...
128
+ @property
129
+ def device(self) -> device: ...
130
+ @property
131
+ def dtype(self) -> dtype: ...
132
+ @property
133
+ def sizes(self) -> List[int]: ...
134
+ @property
135
+ def strides(self) -> List[int]: ...
136
+
137
+ Scalar: TypeAlias = Union[int, float, bool, complex]
138
+ Input: TypeAlias = Optional[Union[_TensorMetadata, List[_TensorMetadata], Scalar]]
139
+
140
+ class _ExtraFields_TorchOp:
141
+ name: str
142
+ sequence_number: int
143
+ allow_tf32_cublas: bool
144
+
145
+ @property
146
+ def inputs(self) -> List[Input]: ...
147
+ @property
148
+ def scope(self) -> RecordScope: ...
149
+
150
+ class _ExtraFields_Backend: ...
151
+
152
+ class _ExtraFields_Allocation:
153
+ ptr: int
154
+ id: Optional[int]
155
+ alloc_size: int
156
+ total_allocated: int
157
+ total_reserved: int
158
+
159
+ @property
160
+ def allocation_id(self) -> Optional[int]: ...
161
+ @property
162
+ def device(self) -> device: ...
163
+
164
+ class _ExtraFields_OutOfMemory: ...
165
+
166
+ class _PyFrameState:
167
+ line_number: int
168
+ function_name: str
169
+
170
+ @property
171
+ def file_name(self) -> str: ...
172
+
173
+ class _NNModuleInfo:
174
+ @property
175
+ def self_ptr(self) -> int: ...
176
+ @property
177
+ def cls_ptr(self) -> int: ...
178
+ @property
179
+ def cls_name(self) -> str: ...
180
+ @property
181
+ def parameters(
182
+ self,
183
+ ) -> List[Tuple[str, _TensorMetadata, Optional[_TensorMetadata]]]: ...
184
+
185
+ class _OptimizerInfo:
186
+ @property
187
+ def parameters(
188
+ self,
189
+ ) -> List[
190
+ Tuple[
191
+ # Parameter
192
+ _TensorMetadata,
193
+ #
194
+ # Gradient (if present during optimizer.step())
195
+ Optional[_TensorMetadata],
196
+ #
197
+ # Optimizer state for Parameter as (name, tensor) pairs
198
+ List[Tuple[str, _TensorMetadata]],
199
+ ]
200
+ ]: ...
201
+
202
+ class _ExtraFields_PyCCall:
203
+ @property
204
+ def caller(self) -> _PyFrameState: ...
205
+
206
+ class _ExtraFields_PyCall:
207
+ @property
208
+ def callsite(self) -> _PyFrameState: ...
209
+ @property
210
+ def caller(self) -> _PyFrameState: ...
211
+ @property
212
+ def module(self) -> Optional[_NNModuleInfo]: ...
213
+ @property
214
+ def optimizer(self) -> Optional[_OptimizerInfo]: ...
215
+
216
+ class _ExtraFields_Kineto: ...
217
+
218
+ def _add_execution_trace_observer(output_file_path: str) -> bool: ...
219
+ def _remove_execution_trace_observer() -> None: ...
220
+ def _enable_execution_trace_observer() -> None: ...
221
+ def _disable_execution_trace_observer() -> None: ...
222
+ def _set_record_concrete_inputs_enabled_val(val: bool) -> None: ...
223
+ def _set_fwd_bwd_enabled_val(val: bool) -> None: ...
224
+ def _set_cuda_sync_enabled_val(val: bool) -> None: ...
225
+
226
+ class CapturedTraceback: ...
227
+
228
+ def gather_traceback(python: bool, script: bool, cpp: bool) -> CapturedTraceback: ...
229
+
230
+ # The Dict has name, filename, line
231
+ def symbolize_tracebacks(
232
+ to_symbolize: List[CapturedTraceback],
233
+ ) -> List[List[Dict[str, str]]]: ...
234
+
235
+ class _RecordFunctionFast:
236
+ def __init__(self, name: str) -> None: ...
237
+ def __enter__(self) -> None: ...
238
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: ...
venv/lib/python3.10/site-packages/torch/_C/_verbose.pyi ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Defined in torch/csrc/utils/verbose.cpp
2
+ def mkl_set_verbose(enable: int) -> int: ...
3
+ def mkldnn_set_verbose(level: int) -> int: ...
venv/lib/python3.10/site-packages/torch/contrib/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/contrib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (181 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/contrib/__pycache__/_tensorboard_vis.cpython-310.pyc ADDED
Binary file (5.31 kB). View file
 
venv/lib/python3.10/site-packages/torch/contrib/_tensorboard_vis.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from collections import defaultdict
3
+ from functools import partial
4
+ from typing import DefaultDict
5
+
6
+ import torch
7
+
8
+
9
+ # Unfortunately it doesn't seem as if there was any way to get TensorBoard to do
10
+ # anything without having TF installed, and so this file has a hard dependency on it
11
+ # as well. It really is a debugging tool, so it doesn't matter.
12
+ try:
13
+ from tensorflow.core.util import event_pb2
14
+ from tensorflow.core.framework import graph_pb2
15
+ from tensorflow.python.summary.writer.writer import FileWriter
16
+ except ImportError:
17
+ raise ImportError("TensorBoard visualization of GraphExecutors requires having "
18
+ "TensorFlow installed") from None
19
+
20
+
21
+ def dump_tensorboard_summary(graph_executor, logdir):
22
+ with FileWriter(logdir) as w:
23
+ pb_graph = visualize(graph_executor)
24
+ evt = event_pb2.Event(wall_time=time.time(), graph_def=pb_graph.SerializeToString())
25
+ w.add_event(evt)
26
+
27
+
28
+ def visualize(graph, name_prefix='', pb_graph=None, executors_it=None):
29
+ """Visualizes an independent graph, or a graph executor."""
30
+ value_map = {}
31
+ pb_graph = pb_graph or graph_pb2.GraphDef()
32
+
33
+ if isinstance(graph, torch._C.GraphExecutorState):
34
+ visualize_graph_executor(graph, name_prefix, pb_graph,
35
+ partial(visualize, pb_graph=pb_graph))
36
+ return pb_graph
37
+
38
+ # Set up an input node
39
+ input_node = pb_graph.node.add(op='input', name=name_prefix + 'input')
40
+ for i, value in enumerate(graph.param_node().outputs()):
41
+ value_map[value.unique()] = name_prefix + 'input:' + str(i)
42
+
43
+ visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it)
44
+
45
+ # Gather all outputs
46
+ return_node = pb_graph.node.add(op='output', name=name_prefix + 'output')
47
+ for value in graph.return_node().inputs():
48
+ return_node.input.append(value_map[value.unique()])
49
+
50
+ return pb_graph
51
+
52
+
53
+ def visualize_graph_executor(state, name_prefix, pb_graph, inline_graph):
54
+ """Append the state of a given GraphExecutor to the graph protobuf.
55
+
56
+ Args:
57
+ state (GraphExecutor or GraphExecutorState): GraphExecutor to display.
58
+ name_prefix (str): Name prefix of the containing subgraph.
59
+ pb_graph (GraphDef): graph to append to.
60
+ inline_graph (Callable): a function that handles setting up a value_map,
61
+ so that some graphs in here can be inlined. This is necessary, because
62
+ this will simply be `visualize` for the top-level GraphExecutor,
63
+ or `inline_graph` for all nested ones.
64
+
65
+ The signature should look like (Graph, name_prefix) -> ().
66
+ It will be called exactly once.
67
+
68
+ The strategy is to embed all different configurations as independent subgraphs,
69
+ while inlining the original graph as the one that actually produces the values.
70
+ """
71
+ if state.autograd_fallback_graph is not None:
72
+ visualize(graph=state.autograd_fallback_graph,
73
+ name_prefix=name_prefix + 'autograd_fallback/',
74
+ pb_graph=pb_graph,
75
+ executors_it=iter(state.autograd_fallback.executors()))
76
+
77
+ for i, (arg_spec, plan) in enumerate(state.execution_plans.items()):
78
+ subgraph_name = name_prefix + f'plan{i}/'
79
+
80
+ # Create a disconnected node that will keep information regarding the input
81
+ # types of this trace. This is unfortunately a bit too verbose to be included
82
+ # in the subgraph name.
83
+ input_kinds = pb_graph.node.add(op='INPUT_KIND', name=subgraph_name)
84
+ input_kinds.attr['inputs'].s = repr(arg_spec).encode('ascii')
85
+
86
+ visualize(plan.graph, subgraph_name, pb_graph, iter(plan.code.executors()))
87
+
88
+ # Show gradient as an independent subgraph of this plan
89
+ if plan.grad_executor is not None:
90
+ grad_subgraph_name = subgraph_name + 'grad/'
91
+ visualize(plan.grad_executor, grad_subgraph_name, pb_graph)
92
+
93
+ return inline_graph(state.graph, name_prefix + 'original/')
94
+
95
+
96
+ def visualize_rec(graph, value_map, name_prefix, pb_graph, executors_it=None):
97
+ """Recursive part of visualize (basically skips setting up the input and output nodes)."""
98
+ def inline_graph(subgraph, name, node):
99
+ rec_value_map = {inp.unique(): value_map[val.unique()]
100
+ for inp, val in zip(subgraph.inputs(), node.inputs())}
101
+ visualize_rec(graph=subgraph,
102
+ value_map=rec_value_map,
103
+ name_prefix=name,
104
+ pb_graph=pb_graph)
105
+ for out, val in zip(subgraph.outputs(), node.outputs()):
106
+ value_map[val.unique()] = rec_value_map[out.unique()]
107
+
108
+ op_id_counter: DefaultDict[str, int] = defaultdict(int)
109
+
110
+ def name_for(node):
111
+ kind = node.kind()[node.kind().index('::') + 2:]
112
+ op_id_counter[kind] += 1
113
+ return kind, name_prefix + kind + '_' + str(op_id_counter[kind])
114
+
115
+ def add_fusion_group(node):
116
+ op, name = name_for(node)
117
+ inline_graph(node.g('Subgraph'), name + '/', node)
118
+
119
+ def add_graph_executor(node):
120
+ op, name = name_for(node)
121
+ if executors_it is None:
122
+ add_node(node)
123
+ else:
124
+ ge = next(executors_it)
125
+ visualize_graph_executor(ge, name + '/', pb_graph,
126
+ partial(inline_graph, node=node))
127
+
128
+ def add_node(node):
129
+ if node.kind() == 'prim::FusionGroup':
130
+ return add_fusion_group(node)
131
+ elif node.kind() == 'prim::GraphExecutor':
132
+ return add_graph_executor(node)
133
+ op, name = name_for(node)
134
+ pb_node = pb_graph.node.add(op=op, name=name)
135
+ for value in node.inputs():
136
+ pb_node.input.append(value_map[value.unique()])
137
+ # TODO: handle attrs
138
+ for i, value in enumerate(node.outputs()):
139
+ value_map[value.unique()] = name + ':' + str(i)
140
+
141
+ for node in graph.nodes():
142
+ add_node(node)
venv/lib/python3.10/site-packages/torch/nested/__init__.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Union, Sequence
2
+
3
+ import torch
4
+ from torch import SymInt, Tensor
5
+ from torch._C import _add_docstr, _nested # type: ignore[attr-defined]
6
+
7
+ from torch.types import _device as Device, _dtype as DType
8
+
9
+ __all__ = [
10
+ "to_padded_tensor",
11
+ "as_nested_tensor",
12
+ "nested_tensor",
13
+ "narrow",
14
+ ]
15
+
16
+ # Nested Tensor constructor functions
17
+
18
+
19
+ def as_nested_tensor(
20
+ tensor_list: Sequence[Tensor],
21
+ dtype: Optional[DType] = None,
22
+ device: Optional[Device] = None,
23
+ layout=None
24
+ ) -> Tensor:
25
+ r"""
26
+ Constructs a nested tensor preserving autograd history from :attr:`tensor_list` a list of tensors.
27
+
28
+ .. note::
29
+ Tensors within the list are always copied by this function due to current nested tensor semantics.
30
+
31
+ Args:
32
+ tensor_list (List[Tensor]): a list of tensors with the same ndim
33
+
34
+ Keyword arguments:
35
+ dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor.
36
+ Default: if None, same :class:`torch.dtype` as leftmost tensor in the list.
37
+ device (:class:`torch.device`, optional): the desired device of returned nested tensor.
38
+ Default: if None, same :class:`torch.device` as leftmost tensor in the list
39
+ layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
40
+ Only strided and jagged layouts are supported. Default: if None, the strided layout.
41
+
42
+ Example::
43
+
44
+ >>> a = torch.arange(3, dtype=torch.float, requires_grad=True)
45
+ >>> b = torch.arange(5, dtype=torch.float, requires_grad=True)
46
+ >>> nt = torch.nested.as_nested_tensor([a, b])
47
+ >>> nt.is_leaf
48
+ False
49
+ >>> fake_grad = torch.nested.nested_tensor([torch.ones_like(a), torch.zeros_like(b)])
50
+ >>> nt.backward(fake_grad)
51
+ >>> a.grad
52
+ tensor([1., 1., 1.])
53
+ >>> b.grad
54
+ tensor([0., 0., 0., 0., 0.])
55
+ """
56
+ if not isinstance(tensor_list, list) or any(
57
+ not isinstance(t, Tensor) for t in tensor_list
58
+ ):
59
+ raise TypeError(
60
+ "as_nested_tensor(): Expected first argument to be a list of tensors "
61
+ )
62
+
63
+ if layout is None:
64
+ layout = torch.strided
65
+ if layout == torch.strided:
66
+ return torch._nested_tensor_from_tensor_list(tensor_list, dtype, None, device, None)
67
+ elif layout == torch.jagged:
68
+ from torch.nested._internal.nested_tensor import jagged_from_list
69
+
70
+ nt, _ = jagged_from_list(tensor_list, offsets=None, device=device, dtype=dtype)
71
+ return nt
72
+ else:
73
+ raise RuntimeError(f"Specified layout is unsupported for nested tensors: {layout}")
74
+
75
+
76
+ # Note: This not only adds doc strings for the nested ops, but
77
+ # also connects the torch.nested Python namespace to the torch._C._nested builtins.
78
+
79
+ to_padded_tensor = _add_docstr(
80
+ _nested.nested_to_padded_tensor,
81
+ r"""
82
+ to_padded_tensor(input, padding, output_size=None, out=None) -> Tensor
83
+
84
+ Returns a new (non-nested) Tensor by padding the :attr:`input` nested tensor.
85
+ The leading entries will be filled with the nested data,
86
+ while the trailing entries will be padded.
87
+
88
+ .. warning::
89
+
90
+ :func:`to_padded_tensor` always copies the underlying data,
91
+ since the nested and the non-nested tensors differ in memory layout.
92
+
93
+ Args:
94
+ padding (float): The padding value for the trailing entries.
95
+
96
+ Keyword args:
97
+ output_size (Tuple[int]): The size of the output tensor.
98
+ If given, it must be large enough to contain all nested data;
99
+ else, will infer by taking the max size of each nested sub-tensor along each dimension.
100
+ out (Tensor, optional): the output tensor.
101
+
102
+ Example::
103
+
104
+ >>> nt = torch.nested.nested_tensor([torch.randn((2, 5)), torch.randn((3, 4))])
105
+ nested_tensor([
106
+ tensor([[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
107
+ [-1.9967, -1.0054, 1.8972, 0.9174, -1.4995]]),
108
+ tensor([[-1.8546, -0.7194, -0.2918, -0.1846],
109
+ [ 0.2773, 0.8793, -0.5183, -0.6447],
110
+ [ 1.8009, 1.8468, -0.9832, -1.5272]])
111
+ ])
112
+ >>> pt_infer = torch.nested.to_padded_tensor(nt, 0.0)
113
+ tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
114
+ [-1.9967, -1.0054, 1.8972, 0.9174, -1.4995],
115
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]],
116
+ [[-1.8546, -0.7194, -0.2918, -0.1846, 0.0000],
117
+ [ 0.2773, 0.8793, -0.5183, -0.6447, 0.0000],
118
+ [ 1.8009, 1.8468, -0.9832, -1.5272, 0.0000]]])
119
+ >>> pt_large = torch.nested.to_padded_tensor(nt, 1.0, (2, 4, 6))
120
+ tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276, 1.0000],
121
+ [-1.9967, -1.0054, 1.8972, 0.9174, -1.4995, 1.0000],
122
+ [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000],
123
+ [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]],
124
+ [[-1.8546, -0.7194, -0.2918, -0.1846, 1.0000, 1.0000],
125
+ [ 0.2773, 0.8793, -0.5183, -0.6447, 1.0000, 1.0000],
126
+ [ 1.8009, 1.8468, -0.9832, -1.5272, 1.0000, 1.0000],
127
+ [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]])
128
+ >>> pt_small = torch.nested.to_padded_tensor(nt, 2.0, (2, 2, 2))
129
+ RuntimeError: Value in output_size is less than NestedTensor padded size. Truncation is not supported.
130
+
131
+ """,
132
+ )
133
+
134
+ def nested_tensor(tensor_list, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor:
135
+ r"""
136
+ Constructs a nested tensor with no autograd history (also known as a “leaf tensor”, see
137
+ :ref:`Autograd mechanics <autograd-mechanics>`) from :attr:`tensor_list` a list of tensors.
138
+
139
+ Args:
140
+ tensor_list (List[array_like]): a list of tensors, or anything that can be passed to torch.tensor,
141
+ where each element of the list has the same dimensionality.
142
+
143
+ Keyword arguments:
144
+ dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor.
145
+ Default: if None, same :class:`torch.dtype` as leftmost tensor in the list.
146
+ layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
147
+ Only strided and jagged layouts are supported. Default: if None, the strided layout.
148
+ device (:class:`torch.device`, optional): the desired device of returned nested tensor.
149
+ Default: if None, same :class:`torch.device` as leftmost tensor in the list
150
+ requires_grad (bool, optional): If autograd should record operations on the
151
+ returned nested tensor. Default: ``False``.
152
+ pin_memory (bool, optional): If set, returned nested tensor would be allocated in
153
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
154
+
155
+ Example::
156
+
157
+ >>> a = torch.arange(3, dtype=torch.float, requires_grad=True)
158
+ >>> b = torch.arange(5, dtype=torch.float, requires_grad=True)
159
+ >>> nt = torch.nested.nested_tensor([a, b], requires_grad=True)
160
+ >>> nt.is_leaf
161
+ True
162
+ """
163
+ if layout is None:
164
+ layout = torch.strided
165
+ if layout == torch.strided:
166
+ return _nested.nested_tensor(
167
+ tensor_list,
168
+ dtype=dtype,
169
+ device=device,
170
+ requires_grad=requires_grad,
171
+ pin_memory=pin_memory)
172
+ elif layout == torch.jagged:
173
+ # Need to wrap lists of scalars as tensors
174
+ list_of_tensors = [t if isinstance(t, Tensor) else torch.as_tensor(t) for t in tensor_list]
175
+
176
+ from torch.nested._internal.nested_tensor import jagged_from_list
177
+
178
+ with torch.no_grad():
179
+ nt, _ = jagged_from_list(list_of_tensors, offsets=None, device=device, dtype=dtype)
180
+
181
+ nt.requires_grad_(requires_grad)
182
+ if pin_memory:
183
+ nt = nt.pin_memory() # type: ignore[assignment]
184
+
185
+ return nt
186
+ else:
187
+ raise RuntimeError(f"Specified layout is unsupported for nested tensors: {layout}")
188
+
189
+
190
+ def narrow(tensor: Tensor, dim: int, start: Union[int, Tensor], length: Union[int, Tensor], layout=torch.strided) -> Tensor:
191
+ r"""
192
+ Constructs a nested tensor (which might be a view) from :attr:`tensor`, a strided tensor. This follows
193
+ similar semantics to torch.Tensor.narrow, where in the :attr:`dim`-th dimension the new nested tensor
194
+ shows only the elements in the interval `[start, start+length)`. As nested representations
195
+ allow for a different `start` and `length` at each 'row' of that dimension, :attr:`start` and :attr:`length`
196
+ can also be tensors of shape `tensor.shape[0]`.
197
+
198
+ There's some differences depending on the layout you use for the nested tensor. If using strided layout,
199
+ torch.narrow will do a copy of the narrowed data into a contiguous NT with strided layout, while
200
+ jagged layout narrow() will create a non-contiguous view of your original strided tensor. This particular
201
+ representation is really useful for representing kv-caches in Transformer models, as specialized
202
+ SDPA kernels can deal with format easily, resulting in performance improvements.
203
+
204
+
205
+ Args:
206
+ tensor (:class:`torch.Tensor`): a strided tensor, which will be used as the underlying data
207
+ for the nested tensor if using the jagged layout or will be copied for the strided layout.
208
+ dim (int): the dimension where narrow will be applied. Only `dim=1` is supported for the
209
+ jagged layout, while strided supports all dim
210
+ start (Union[int, :class:`torch.Tensor`]): starting element for the narrow operation
211
+ length (Union[int, :class:`torch.Tensor`]): number of elements taken during the narrow op
212
+
213
+ Keyword arguments:
214
+ layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
215
+ Only strided and jagged layouts are supported. Default: if None, the strided layout.
216
+
217
+ Example::
218
+
219
+ >>> starts = torch.tensor([0, 1, 2, 3, 4], dtype=torch.int64)
220
+ >>> lengths = torch.tensor([3, 2, 2, 1, 5], dtype=torch.int64)
221
+ >>> narrow_base = torch.randn(5, 10, 20)
222
+ >>> nt_narrowed = torch.nested.narrow(narrow_base, 1, starts, lengths, layout=torch.jagged)
223
+ >>> nt_narrowed.is_contiguous()
224
+ False
225
+ """
226
+ if not isinstance(start, (int, SymInt, Tensor)):
227
+ raise RuntimeError("start must be an integer or a tensor")
228
+
229
+ if not isinstance(length, (int, SymInt, Tensor)):
230
+ raise RuntimeError("length must be an integer or a tensor")
231
+
232
+ if layout == torch.strided:
233
+ if isinstance(start, Tensor) or isinstance(length, Tensor):
234
+ raise RuntimeError("start and length must be integers for the strided layout NT impl")
235
+ # TODO: switch to as_nested_tensor(tensor) when it is available
236
+ nt = as_nested_tensor(torch.unbind(tensor), layout=torch.strided).narrow(dim, start, length)
237
+ elif layout == torch.jagged:
238
+ if dim != 1:
239
+ raise RuntimeError("jagged layout only supports dim=1")
240
+
241
+ from torch.nested._internal.nested_tensor import jagged_from_tensor_and_lengths
242
+
243
+ if isinstance(start, (int, SymInt)):
244
+ start = torch.tensor([start], device=tensor.device, dtype=torch.int64)
245
+
246
+ if isinstance(length, (int, SymInt)):
247
+ length = torch.tensor([length], device=tensor.device, dtype=torch.int64)
248
+
249
+ nt, _, _ = jagged_from_tensor_and_lengths(tensor, start, length)
250
+ else:
251
+ raise RuntimeError(f"Specified layout is unsupported for nested narrow: {layout}")
252
+
253
+ return nt
venv/lib/python3.10/site-packages/torch/nested/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/nested/_internal/nested_tensor.py ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+
3
+ import torch
4
+ from torch._C import DispatchKey, DispatchKeySet
5
+ from torch._prims_common import is_expandable_to
6
+ from torch.fx.experimental.symbolic_shapes import has_free_symbols
7
+ from torch.utils.weak import WeakTensorKeyDictionary
8
+ from typing import * # noqa: F403
9
+
10
+ _tensor_id_counter = 0
11
+ _tensor_symint_registry = WeakTensorKeyDictionary()
12
+
13
+
14
+ def get_tensor_symint(tensor, *, coeff=1):
15
+ global _tensor_id_counter
16
+ tensor_symint = _tensor_symint_registry.get(tensor)
17
+ if tensor_symint is None:
18
+ tensor_symint = torch._C._get_nested_int(_tensor_id_counter, coeff)
19
+ _tensor_id_counter += 1
20
+ _tensor_symint_registry[tensor] = tensor_symint
21
+ return tensor_symint
22
+
23
+
24
+ # SDPA metadata; max / min seqlens are needed for e.g. flash
25
+ def _get_sdpa_extreme_seqlen(func, tensor):
26
+ return int(func(tensor).item())
27
+
28
+
29
+ class NestedTensor(torch.Tensor):
30
+ _values: torch.Tensor # type: ignore[assignment]
31
+ _offsets: torch.Tensor
32
+ _lengths: Optional[torch.Tensor]
33
+ # NOTE [ Nested ints for ragged sizes and strides ]
34
+ #
35
+ # Jagged layout tensors are tensors that represent a n-dim tensor with a
36
+ # ragged dimension, but are backed by an (n-1)-dim tensor underneath, e.g.,
37
+ # a jagged tensor with outer shape [B, x, D] is represented internally by a
38
+ # tensor with shape [sum(x), D] where we introduce what we call a nested int
39
+ # denoted as "x" here (but sometimes denoted with "*" to
40
+ # represent the ragged dimension, and sum(x) represents the dim of the inner
41
+ # tensor or equivalently the sum of all the sizes of the constituent
42
+ # tensors' varying lengths.
43
+ #
44
+ # We also use nested ints to represent the strides of this tensor.
45
+ # For example, a jagged tensor with shape [B, x, D] can be strided in two
46
+ # ways: [xD, D, 1] and [x, 1, sum(x)], where xD represents x multiplied by D
47
+ _size: Tuple[int, ...]
48
+ _stride: Tuple[int, ...]
49
+ # Indicates that the nth dimension is ragged
50
+ _ragged_idx: int
51
+ _metadata_cache: Dict[str, Any]
52
+
53
+ @staticmethod
54
+ def __new__(
55
+ cls,
56
+ values,
57
+ offsets,
58
+ *,
59
+ lengths=None,
60
+ **kwargs,
61
+ ):
62
+ ks = DispatchKeySet(DispatchKey.NestedTensor)
63
+ ks = ks.add(DispatchKey.AutogradNestedTensor)
64
+ r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
65
+ cls,
66
+ (0,),
67
+ (0,),
68
+ 0,
69
+ torch.contiguous_format,
70
+ values.dtype,
71
+ torch.jagged,
72
+ values.device,
73
+ False,
74
+ kwargs.get("requires_grad", False),
75
+ "sizes",
76
+ False,
77
+ True, # dispatch_layout
78
+ ks,
79
+ )
80
+ return r
81
+
82
+ def __init__(self, values, offsets, *, lengths=None, **kwargs):
83
+ super().__init__()
84
+ # Only support jagged for now.
85
+ assert offsets is not None
86
+ assert offsets.ndim == 1
87
+ assert not isinstance(values, NestedTensor)
88
+
89
+ # Query cache for the symint associated with offsets or lengths
90
+ # (create a new one if needed).
91
+ ragged_source = offsets if lengths is None else lengths
92
+ ragged_size = get_tensor_symint(ragged_source, coeff=1)
93
+ self._ragged_idx = kwargs.get("_ragged_idx", 1)
94
+ B = offsets.shape[0] - 1
95
+ if lengths is not None:
96
+ assert B == lengths.shape[0]
97
+
98
+ # subtract 1 to convert to values dim space
99
+ r = self._ragged_idx - 1
100
+ self._size = (B, *values.shape[:r], ragged_size, *values.shape[r + 1 :])
101
+ stride = values.stride()
102
+ self._strides = (ragged_size * stride[r], *stride)
103
+
104
+ self._values = values
105
+ self._offsets = offsets
106
+ self._lengths = lengths
107
+
108
+ # holds properties that are computed lazily
109
+ self._metadata_cache = kwargs.get("_metadata_cache") or {}
110
+
111
+ # collapsed ragged dim must always be dynamic
112
+ torch._dynamo.mark_dynamic(self, self._ragged_idx)
113
+ torch._dynamo.mark_dynamic(self._values, self._ragged_idx - 1)
114
+
115
+ def values(self):
116
+ # dispatch to get proper view relationship
117
+ return torch._nested_get_values(self) # type: ignore[return-value]
118
+
119
+ def offsets(self):
120
+ return self._offsets
121
+
122
+ def lengths(self):
123
+ return self._lengths
124
+
125
+ @property
126
+ def _max_seqlen(self):
127
+ if "max_seqlen" not in self._metadata_cache:
128
+ # compute & cache
129
+ self._metadata_cache["max_seqlen"] = _get_sdpa_extreme_seqlen(
130
+ torch.max,
131
+ self._offsets.diff() if self._lengths is None else self._lengths,
132
+ )
133
+ return self._metadata_cache["max_seqlen"]
134
+
135
+ @property
136
+ def _min_seqlen(self):
137
+ if "min_seqlen" not in self._metadata_cache:
138
+ # compute & cache
139
+ self._metadata_cache["min_seqlen"] = _get_sdpa_extreme_seqlen(
140
+ torch.min,
141
+ self._offsets.diff() if self._lengths is None else self._lengths,
142
+ )
143
+ return self._metadata_cache["min_seqlen"]
144
+
145
+ def __repr__(self):
146
+ # We should implement this in torch/_tensor_str.py instead
147
+ grad_fn_str = (
148
+ f", requires_grad={self.requires_grad}" if self.requires_grad else ""
149
+ )
150
+ if self.grad_fn:
151
+ grad_fn_str = f", grad_fn={self.grad_fn}"
152
+ return f"NestedTensor(size={self._size}, offsets={self._offsets}{grad_fn_str}, contiguous={self._lengths is None})"
153
+
154
+ def __reduce_ex__(self, proto):
155
+ state = torch._utils._get_obj_state(self)
156
+
157
+ # SymNodes are not serializable
158
+ assert "_size" in state and "_strides" in state
159
+ state = dict(state)
160
+ del state["_size"]
161
+ del state["_strides"]
162
+
163
+ func = NestedTensor
164
+ args = (self._values, self._offsets)
165
+ return (torch._tensor._rebuild_from_type_v2, (func, type(self), args, state))
166
+
167
+ def __tensor_flatten__(self):
168
+ ctx = {
169
+ "requires_grad": self.requires_grad,
170
+ # TODO: Don't guard on this!
171
+ "metadata_cache": self._metadata_cache,
172
+ "ragged_idx": self._ragged_idx,
173
+ }
174
+ inner_tensors = ["_values", "_offsets"]
175
+ if self._lengths is not None:
176
+ inner_tensors.append("_lengths")
177
+ return inner_tensors, ctx
178
+
179
+ @staticmethod
180
+ def __tensor_unflatten__(inner_tensors: Dict, meta, outer_size, outer_stride):
181
+ # inner tensors: _values, _offsets, [_lengths]
182
+ assert len(inner_tensors) >= 2 and len(inner_tensors) <= 3
183
+ values = inner_tensors["_values"]
184
+ offsets = inner_tensors["_offsets"]
185
+ lengths = inner_tensors.get("_lengths", None)
186
+ ragged_idx = meta["ragged_idx"]
187
+
188
+ # Note that we cannot simply check if is_fake(values) because
189
+ # during aot autograd, FunctionalTensors are not fake but hold
190
+ # symbolic sizes.
191
+ ragged_source = offsets if lengths is None else lengths
192
+ if has_free_symbols(ragged_source) or has_free_symbols(values):
193
+ # Associate offsets or lengths (possibly fake, possibly functionalized)
194
+ # with the ragged_size.
195
+ ragged_size = outer_size[ragged_idx]
196
+ _tensor_symint_registry[ragged_source] = ragged_size
197
+
198
+ return NestedTensor(
199
+ values,
200
+ offsets=offsets,
201
+ lengths=lengths,
202
+ requires_grad=meta["requires_grad"],
203
+ _ragged_idx=ragged_idx,
204
+ _metadata_cache=meta["metadata_cache"],
205
+ )
206
+
207
+ @classmethod
208
+ def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
209
+ kwargs = {} if kwargs is None else kwargs
210
+
211
+ # Lazy import to avoid circular dependency
212
+ from .ops import lookup_jagged
213
+
214
+ fn = lookup_jagged(func, *args, **kwargs)
215
+ if fn is not None:
216
+ return fn(*args, **kwargs)
217
+
218
+ raise NotImplementedError(func)
219
+
220
+ @classmethod
221
+ def __torch_function__(cls, func, types, args=(), kwargs=None):
222
+ if kwargs is None:
223
+ kwargs = {}
224
+
225
+ from .ops import jagged_torch_function
226
+
227
+ try:
228
+ return jagged_torch_function(func, *args, **kwargs)
229
+ except NotImplementedError:
230
+ pass
231
+ with torch._C.DisableTorchFunctionSubclass():
232
+ return func(*args, **kwargs)
233
+
234
+
235
+ # NB: These fake view autograd.Functions are superseded by real view ops. Don't use them!
236
+ # TODO: Remove ViewBufferFromNested, ViewNestedFromBuffer, and buffer_from_jagged once the
237
+ # internal BC period has passed.
238
+
239
+
240
+ # Not actually a view!
241
+ class ViewBufferFromNested(torch.autograd.Function):
242
+ @staticmethod
243
+ def forward(ctx, x: NestedTensor): # type: ignore[override]
244
+ ctx.save_for_backward(x.offsets())
245
+ ctx.metadata_cache = x._metadata_cache
246
+ ctx.ragged_idx = x._ragged_idx
247
+ return x._values
248
+
249
+ @staticmethod
250
+ def backward(ctx, gO: torch.Tensor): # type: ignore[override]
251
+ (offsets,) = ctx.saved_tensors
252
+ return NestedTensor(
253
+ gO,
254
+ offsets=offsets,
255
+ _metadata_cache=ctx.metadata_cache,
256
+ _ragged_idx=ctx.ragged_idx,
257
+ )
258
+
259
+
260
+ # Not actually a view!
261
+ class ViewNestedFromBuffer(torch.autograd.Function):
262
+ @staticmethod
263
+ def forward(
264
+ ctx,
265
+ values: torch.Tensor,
266
+ offsets: torch.Tensor,
267
+ metadata_cache: Optional[Dict[str, Any]] = None,
268
+ ): # type: ignore[override]
269
+ return NestedTensor(
270
+ values.detach(),
271
+ offsets=offsets,
272
+ _metadata_cache=metadata_cache,
273
+ )
274
+
275
+ @staticmethod
276
+ def backward(ctx, gO: NestedTensor): # type: ignore[override]
277
+ return gO._values, None, None
278
+
279
+
280
+ def buffer_from_jagged(jagged):
281
+ return ViewBufferFromNested.apply(jagged)
282
+
283
+
284
+ # Need to make it obvious that users should be passing in offsets
285
+ def jagged_from_list(
286
+ tensors: List[torch.Tensor],
287
+ offsets: Optional[torch.Tensor],
288
+ dtype=None,
289
+ device=None,
290
+ ) -> Tuple[NestedTensor, torch.Tensor]:
291
+ """Constructs a NestedTensor backed by jagged layout from a list of tensors"""
292
+
293
+ if not len(set(t.dtype for t in tensors)) == 1: # noqa: C401
294
+ raise RuntimeError(
295
+ "When constructing a nested tensor, all tensors in list must have the same dtype"
296
+ )
297
+ if not len(set(t.device for t in tensors)) == 1: # noqa: C401
298
+ raise RuntimeError(
299
+ "When constructing a nested tensor, all tensors in list must be on the same device"
300
+ )
301
+
302
+ # Check that the NT is representable by the jagged layout.
303
+ # Jagged layout represents (B, *, D_0, D_1, ..., D_N), where the only
304
+ # raggedness allowed is for the single dim immediately adjacent to the batch dim.
305
+ sizes = [t.shape for t in tensors]
306
+ non_first_sizes = [s[1:] for s in sizes]
307
+ at_most_first_ragged = all(s == non_first_sizes[0] for s in non_first_sizes)
308
+ if not at_most_first_ragged:
309
+ raise RuntimeError(
310
+ "Cannot represent given tensor list as a nested tensor with the jagged layout. "
311
+ "Note that the jagged layout only represents shapes of the form "
312
+ "(B, *, D_0, D_1, ..., D_N), with only * allowed to be ragged."
313
+ )
314
+
315
+ # Set properties appropriately.
316
+ values = torch.cat(tensors, dim=0)
317
+ to_kwargs = {}
318
+ if device is not None:
319
+ to_kwargs["device"] = device
320
+ if dtype is not None:
321
+ to_kwargs["dtype"] = dtype
322
+ values = values.to(**to_kwargs)
323
+
324
+ # Calculate jagged offsets if not provided.
325
+ if offsets is None:
326
+ # Jagged layout specifies that offsets are stored as int64 on the same device as values.
327
+ # TODO: An alternative way to construct offsets is to use F.pad. This avoids creating
328
+ # an extra leaf tensor during the forward, potentially resolving compatibility issues.
329
+ offsets = torch.cat(
330
+ [
331
+ torch.zeros(1, dtype=torch.int64, device=values.device),
332
+ torch.tensor([s[0] for s in sizes], device=values.device).cumsum(dim=0),
333
+ ]
334
+ )
335
+
336
+ ret_nt = nested_view_from_values_offsets(values, offsets)
337
+ ret_nt._metadata_cache = {
338
+ # compute this now since it's easy
339
+ "max_seqlen": max([t.shape[0] for t in tensors]),
340
+ "min_seqlen": min([t.shape[0] for t in tensors]),
341
+ }
342
+ return (ret_nt, offsets) # type: ignore[return-value]
343
+
344
+
345
+ def jagged_from_tensor_and_lengths(
346
+ tensor: torch.Tensor, starts: torch.Tensor, lengths: torch.Tensor
347
+ ) -> Tuple[NestedTensor, torch.Tensor, Optional[torch.Tensor]]:
348
+ """Constructs a NestedTensor backed by jagged layout from a tensor, starts of sequences, and sequence lengths"""
349
+ batch_size = tensor.shape[0]
350
+ if is_expandable_to(starts.shape, (batch_size,)) and is_expandable_to(
351
+ lengths.shape, (batch_size,)
352
+ ):
353
+ start_list = starts.expand(batch_size)
354
+ length_list = lengths.expand(batch_size)
355
+ else:
356
+ raise RuntimeError(
357
+ "When constructing a jagged nested tensor using narrow(), "
358
+ "your start and length must be Tensors that broadcast to input.shape[0]"
359
+ )
360
+
361
+ # Calculate jagged offsets
362
+ assert (
363
+ len(tensor.shape) >= 2
364
+ ), "tensor must at least be 2D for the nested narrow op to work"
365
+ max_seq_len = tensor.shape[1]
366
+ offset_lengths = max_seq_len * torch.arange(
367
+ 0, batch_size, dtype=torch.int64, device=tensor.device
368
+ )
369
+ # Jagged layout specifies that offsets are stored as int64 on the same device as values.
370
+ offsets = torch.cat(
371
+ [
372
+ start_list + offset_lengths,
373
+ (start_list[-1] + offset_lengths[-1] + length_list[-1]).unsqueeze(0),
374
+ ]
375
+ )
376
+
377
+ # Reshape buffer to flatten the 1st and 2nd dimension (view used to enforce non-copy)
378
+ if len(tensor.shape) > 2:
379
+ values = tensor.view(-1, *tensor.shape[2:])
380
+ else:
381
+ values = tensor.view(-1)
382
+
383
+ # Check if offsets and lengths make it possibly contiguous and return a regular NT
384
+ is_contiguous = True
385
+ orig_dim = tensor.shape[1]
386
+ if torch.any(length_list[1:-1].ne(orig_dim)):
387
+ is_contiguous = False
388
+ if torch.any(offsets[1:-2].diff().ne(orig_dim)):
389
+ is_contiguous = False
390
+ if offsets[0] + length_list[0] != orig_dim:
391
+ is_contiguous = False
392
+
393
+ actual_max_seqlen = int(torch.max(lengths).item())
394
+ min_seqlen = int(torch.min(lengths).item())
395
+
396
+ if is_contiguous:
397
+ ret_nt = nested_view_from_values_offsets(
398
+ values[offsets[0] : offsets[-1]], offsets - offsets[0]
399
+ )
400
+ else:
401
+ ret_nt = nested_view_from_values_offsets_lengths(values, offsets, length_list)
402
+
403
+ # populate metadata cache with computed seqlen extremes
404
+ ret_nt._metadata_cache = {
405
+ "max_seqlen": actual_max_seqlen,
406
+ "min_seqlen": min_seqlen,
407
+ }
408
+
409
+ return (ret_nt, offsets, None if is_contiguous else length_list)
410
+
411
+
412
+ # NB: A dummy arg is required so that NestedTensor.__torch_dispatch__() is invoked
413
+ # for _nested_view_from_values_offsets(). Sizes don't matter much, but they shouldn't be
414
+ # 0/1 because the dummy can be fake-ified and we want to avoid specializing.
415
+ # This arg is otherwise unused.
416
+ _nt_view_dummy = NestedTensor(
417
+ values=torch.randn(3, 3, device="meta"),
418
+ offsets=torch.randint(3, (2,), device="meta", dtype=torch.int64),
419
+ ).detach()
420
+
421
+
422
+ def nested_view_from_values_offsets(values, offsets, ragged_idx=1):
423
+ return torch._nested_view_from_jagged(
424
+ values, offsets, _nt_view_dummy, None, ragged_idx
425
+ ) # type: ignore[return-value]
426
+
427
+
428
+ def nested_view_from_values_offsets_lengths(values, offsets, lengths, ragged_idx=1):
429
+ return torch._nested_view_from_jagged(
430
+ values, offsets, _nt_view_dummy, lengths, ragged_idx
431
+ ) # type: ignore[return-value]
venv/lib/python3.10/site-packages/torch/nested/_internal/ops.py ADDED
@@ -0,0 +1,1120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import math
3
+ import operator
4
+
5
+ import torch
6
+ from torch.nested._internal.sdpa import jagged_scaled_dot_product_attention
7
+
8
+ from .nested_tensor import NestedTensor
9
+ from typing import * # noqa: F403
10
+ import torch.nn.functional as F
11
+ from torch.fx.operator_schemas import normalize_function
12
+
13
+ __all__: List[Any] = []
14
+
15
+ JAGGED_OPS_TABLE: Dict[Any, Any] = {}
16
+
17
+
18
+ # Simplifying assumption: we assume that the batch dim is always the left-most
19
+ # dim, and the ragged dim is always the second dim.
20
+ def _outer_to_inner_dim(ndim, dim):
21
+ assert dim >= 0 and dim < ndim
22
+ return 0 if dim < 2 else dim - 1
23
+
24
+
25
+ def _wrap_jagged_dim(
26
+ ndim, dim, op_name, convert_to_inner_dim=True, allow_batch_dim=False
27
+ ):
28
+ from torch._prims_common import canonicalize_dims
29
+
30
+ wrapped = canonicalize_dims(ndim, dim)
31
+ if wrapped == 1:
32
+ raise RuntimeError(f"{op_name}(): not supported for NestedTensor on dim=1")
33
+ elif wrapped == 0 and not allow_batch_dim:
34
+ raise RuntimeError(f"{op_name}(): not supported for NestedTensor on dim=0")
35
+ return _outer_to_inner_dim(ndim, wrapped) if convert_to_inner_dim else wrapped
36
+
37
+
38
+ def _wrap_jagged_dims(ndim, dims, op_name):
39
+ # ex: (2, 3, 4) -> (1, 2, 3)
40
+ # ex: (0, 1, 4) -> (0, 3)
41
+ from torch._prims_common import canonicalize_dims
42
+
43
+ wrapped_dims = [canonicalize_dims(ndim, d) for d in dims]
44
+ # This logic needs to be done after we canonicalize dims but before we
45
+ # map to inner dims so we can print a nicer error message.
46
+ zero_in_dims = 0 in wrapped_dims
47
+ one_in_dims = 1 in wrapped_dims
48
+ if zero_in_dims ^ one_in_dims:
49
+ apply, not_apply = ("batch", "ragged") if zero_in_dims else ("ragged", "batch")
50
+ raise RuntimeError(
51
+ f"{op_name}(): applying over the {apply} dimension, but not the {not_apply}"
52
+ " dimension is not supported for NestedTensor"
53
+ )
54
+ return (
55
+ tuple(_outer_to_inner_dim(ndim, d) for d in dims if d != 0),
56
+ zero_in_dims,
57
+ )
58
+
59
+
60
+ def check_schema(schema_str: str, func, *args, **kwargs) -> None:
61
+ named_arg_types = schema_str.split(", ")
62
+ num_optional_args = sum([x.endswith("?") for x in named_arg_types])
63
+ min_args = len(named_arg_types) - num_optional_args
64
+
65
+ # special case: ellipses allows for any number of unchecked args at the end
66
+ if named_arg_types[-1] == "...":
67
+ named_arg_types = named_arg_types[:-1]
68
+ else:
69
+ if not (len(args) >= min_args and len(args) <= len(named_arg_types)):
70
+ raise ValueError(
71
+ f"NestedTensor {func.__name__}({schema_str}): expected at least {min_args} "
72
+ f"arguments and at most {len(named_arg_types)} arguments, but got: "
73
+ f"{len(args)} arguments"
74
+ )
75
+
76
+ arg_type_check_fns = {
77
+ "t": lambda x: isinstance(x, torch.Tensor) and not isinstance(x, NestedTensor),
78
+ "jt": lambda x: isinstance(x, NestedTensor)
79
+ and x._lengths is None
80
+ and x._ragged_idx == 1, # ops with "jt" require contiguous JT only
81
+ "jt_all": lambda x: isinstance(
82
+ x, NestedTensor
83
+ ), # ops with "jt_all" can accept all kinds of JT
84
+ "any": lambda x: True,
85
+ }
86
+ for i, named_arg_type in enumerate(named_arg_types):
87
+ name, arg_type = named_arg_type.split(": ")
88
+ is_optional = arg_type.endswith("?")
89
+ normalized_arg_type = arg_type[:-1] if is_optional else arg_type
90
+ if normalized_arg_type not in arg_type_check_fns.keys():
91
+ raise AssertionError(f"Unknown arg type: {normalized_arg_type}")
92
+
93
+ if i >= len(args):
94
+ if not is_optional:
95
+ raise ValueError(
96
+ f"NestedTensor {func.__name__}({schema_str}) "
97
+ f"missing required argument: {name}"
98
+ )
99
+ continue
100
+
101
+ _check_fn = arg_type_check_fns[normalized_arg_type]
102
+
103
+ def check_fn(x, is_optional=is_optional):
104
+ if is_optional:
105
+ return x is None or _check_fn(x)
106
+ else:
107
+ return _check_fn(x)
108
+
109
+ if not check_fn(args[i]):
110
+ type_to_desc = {
111
+ "t": "tensor",
112
+ "t?": "optional tensor",
113
+ "jt": "contiguous jagged layout NestedTensor",
114
+ "jt_all": "jagged layout NestedTensor",
115
+ "any": "<any type>",
116
+ }
117
+
118
+ raise ValueError(
119
+ f"NestedTensor {func.__name__}({schema_str}): expected {name} to be a "
120
+ f"{type_to_desc[arg_type]}"
121
+ )
122
+
123
+
124
+ def check_ragged_dim_same(
125
+ func, a: NestedTensor, a_name: str, b: NestedTensor, b_name: str
126
+ ) -> None:
127
+ # Calling into .shape here
128
+ if a._size[a._ragged_idx] != b._size[b._ragged_idx]:
129
+ raise RuntimeError(
130
+ f"NestedTensor {func.__name__}: expected {a_name} and {b_name} to have the "
131
+ "same exact offsets tensor."
132
+ )
133
+
134
+
135
+ # returns True if the raggedness-relevant portions of the NT shape
136
+ # match those of the specified size
137
+ def raggedness_matches(nt, size):
138
+ end = nt._ragged_idx + 1
139
+ nt_ragged = nt._size[:end]
140
+ size_ragged = size[:end]
141
+ return len(nt_ragged) == len(size_ragged) and (
142
+ all(ns == s or s == -1 for ns, s in zip(nt_ragged, size_ragged))
143
+ )
144
+
145
+
146
+ def squeeze_leading_ones(t):
147
+ # Note: [ Squeezing leading ones ]
148
+ #
149
+ # Squeeze leading ones from t.
150
+ #
151
+ # We want:
152
+ # (B, j0, ?, ?) + (1, 1, ?, ?) -> (B, j0, ?, ?)
153
+ # (B, j0, ?, ?) + (1, 1, 1, ?, ?) -> (1, B, j0, ?, ?) (not yet supported)
154
+ #
155
+ # 1) Squeeze extra ones and grab values from NT
156
+ # (1, 1, ?, ?) -> (?, ?) and (sum(*), ?, ?) -> (B, j0, ?, ?)
157
+ # 2) Do dense broadcasting:
158
+ # (sum(*), ?, ?) + (?, ?) -> (sum(*), ?, ?)
159
+ # 3) Construct nested tensor
160
+ # (sum(*), ?, ?) -> (B, j0, ?, ?)
161
+ #
162
+ # If unsqueezing on the 0th dim becomes supported, we would unsqueeze
163
+ # at step (4) and we would need to update this function to record how
164
+ # many ones we unsqueezed.
165
+ while t.shape[0] == 1:
166
+ t = t.squeeze(0)
167
+ return t
168
+
169
+
170
+ def register_func(tables, aten_ops, schema_str):
171
+ if not isinstance(aten_ops, list):
172
+ aten_ops = [aten_ops]
173
+ if not isinstance(tables, list):
174
+ tables = [tables]
175
+
176
+ def wrapper(func):
177
+ for aten_op in aten_ops:
178
+
179
+ def get_inner(aten_op):
180
+ def inner(*args, **kwargs):
181
+ check_schema(schema_str, func, *args, **kwargs)
182
+ return func(aten_op, *args, **kwargs)
183
+
184
+ return inner
185
+
186
+ for table in tables:
187
+ table[aten_op] = get_inner(aten_op)
188
+ return func
189
+
190
+ return wrapper
191
+
192
+
193
+ register_jagged_func = functools.partial(register_func, JAGGED_OPS_TABLE)
194
+
195
+
196
+ def lookup_jagged(func, *args, **kwargs) -> Optional[Callable]:
197
+ dispatch_func = JAGGED_OPS_TABLE.get(func, None)
198
+ if dispatch_func is not None:
199
+ return dispatch_func
200
+
201
+ # Handle pointwise fallbacks
202
+ if torch.Tag.pointwise in func.tags:
203
+ # Assume there aren't additional tensors that aren't the "unary/binary" args
204
+ num_tensor_args = sum([isinstance(x, torch.Tensor) for x in args])
205
+ if num_tensor_args == 1:
206
+ check_schema("self: jt_all, ...", func, *args, **kwargs)
207
+ return functools.partial(jagged_unary_pointwise, func)
208
+ elif num_tensor_args == 2:
209
+ check_schema("lhs: any, rhs: any, ...", func, *args, **kwargs)
210
+ return functools.partial(jagged_binary_pointwise, func)
211
+
212
+ return None
213
+
214
+
215
+ def extract_kwargs(arg):
216
+ kwargs = {
217
+ "offsets": arg.offsets(),
218
+ "_metadata_cache": arg._metadata_cache,
219
+ "_ragged_idx": arg._ragged_idx,
220
+ }
221
+ return kwargs
222
+
223
+
224
+ def jagged_unary_pointwise(func, *args, **kwargs):
225
+ return NestedTensor(
226
+ func(args[0]._values, *args[1:], **kwargs), **extract_kwargs(args[0])
227
+ )
228
+
229
+
230
+ def jagged_binary_pointwise(func, *args, **kwargs):
231
+ a, b = args[0], args[1]
232
+ assert isinstance(a, NestedTensor) or isinstance(b, NestedTensor)
233
+
234
+ mismatch_error_msg = (
235
+ "cannot call binary pointwise function {} with inputs of shapes {} and {}"
236
+ )
237
+ # a is NT, b is NT
238
+ if isinstance(a, NestedTensor) and isinstance(b, NestedTensor):
239
+ # ex: (B, j0, D) + (B, j0, D)
240
+ # ex: (B, j0, D) + (B, j0, 1)
241
+ if raggedness_matches(a, b._size):
242
+ return NestedTensor(
243
+ func(a._values, b._values, *args[2:], **kwargs), **extract_kwargs(a)
244
+ )
245
+ raise RuntimeError(mismatch_error_msg.format(func.__name__, a._size, b._size))
246
+ # either a is NT or b is NT at this point
247
+ a_is_nt = isinstance(a, NestedTensor)
248
+ extracted_kwargs = extract_kwargs(a) if a_is_nt else extract_kwargs(b)
249
+
250
+ # === Handle broadcasting across the batch / ragged dims ===
251
+
252
+ # Easy case: take advantage of pre-existing broadcasting logic
253
+ # ex: (B, j0, ?, ?) + (?) -> (B, j0, ?, ?)
254
+ # ex: (B, j0, ?, ?) + (?, ?) -> (B, j0, ?, ?)
255
+ # ex: (B, j0, ?, ?) + (1, 1, ?, ?) -> (B, j0, ?, ?)
256
+ nt, t = (a, b) if a_is_nt else (b, a)
257
+ # See Note: [ Squeezing leading ones ]
258
+ if t.dim() > nt.dim():
259
+ raise NotImplementedError("NYI: broadcasting NT with T with larger dim")
260
+ t_squeezed = squeeze_leading_ones(t)
261
+ if nt.dim() >= t_squeezed.dim() + 2:
262
+ lhs, rhs = (nt._values, t_squeezed) if a_is_nt else (t_squeezed, nt._values)
263
+ return NestedTensor(func(lhs, rhs, *args[2:], **kwargs), **extracted_kwargs)
264
+
265
+ # Harder case: do manual broadcasting over unbound components
266
+ # when NT dim == non-NT dim
267
+ # ex: (B, j0, D_0, D_1) + (B, 1, D_0, D_1) -> (B, j0, D_0, D_1)
268
+ if a.dim() == b.dim():
269
+ # ex: (B, j0, D_0, D_1) + (1, 1, D_0, D_1) -> should
270
+ # be (B, j0, D_0, D_1) but not yet supported
271
+ if a.shape[0] != b.shape[0]:
272
+ raise RuntimeError(
273
+ mismatch_error_msg.format(func.__name__, a.shape, b.shape)
274
+ )
275
+
276
+ # need to use offsets to broadcast across ragged dim properly
277
+ # NB: inefficient fallback here; Triton codegen can help this
278
+ # TODO: Make this work with autograd
279
+ outputs = []
280
+ for a_comp, b_comp in zip(a.unbind(), b.unbind()):
281
+ outputs.append(func(a_comp, b_comp, *args[2:], **kwargs))
282
+ new_values = torch.cat(outputs, dim=0)
283
+ return NestedTensor(new_values, **extracted_kwargs)
284
+
285
+ # ex: (B, j0, D_0, D_1) + (A, B, 1, D_0, D_1) -> error because this breaks the invariant
286
+ # that ragged dim is wrt left-most batch dim
287
+ raise RuntimeError(mismatch_error_msg.format(func.__name__, a.shape, b.shape))
288
+
289
+
290
+ def jagged_torch_function(func, *args, **kwargs):
291
+ # SDPA has special kernels that handle nested tensors.
292
+ # Dispatch to the correct implementation here
293
+ if func is torch._C._nn.scaled_dot_product_attention:
294
+ return jagged_scaled_dot_product_attention(*args, **kwargs)
295
+
296
+ # Handle flatten() here because it's CompositeImplicit.
297
+ if func.__name__ == "flatten":
298
+
299
+ def _flatten_sig(input, start_dim=0, end_dim=-1):
300
+ pass
301
+
302
+ _, new_kwargs = normalize_function(
303
+ _flatten_sig, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
304
+ )
305
+
306
+ inp = new_kwargs.pop("input")
307
+
308
+ # NB: stay in outer dim space because we're going to redispatch on a NT input
309
+ start_dim = _wrap_jagged_dim(
310
+ inp.dim(), new_kwargs["start_dim"], "flatten", convert_to_inner_dim=False
311
+ )
312
+ end_dim = _wrap_jagged_dim(
313
+ inp.dim(), new_kwargs["end_dim"], "flatten", convert_to_inner_dim=False
314
+ )
315
+
316
+ if start_dim == end_dim:
317
+ return inp
318
+
319
+ product = functools.reduce(operator.mul, inp.shape[start_dim : end_dim + 1])
320
+ new_shape = (*inp.shape[:start_dim], product, *inp.shape[end_dim + 1 :])
321
+
322
+ return inp.reshape(*new_shape)
323
+
324
+ raise NotImplementedError(func)
325
+
326
+
327
+ @register_jagged_func(
328
+ [
329
+ torch.ops.aten.is_non_overlapping_and_dense.default,
330
+ torch.ops.aten.sym_size.default,
331
+ torch.ops.aten.dim.default,
332
+ torch.ops.aten.sym_numel.default,
333
+ torch.ops.aten.sym_stride.default,
334
+ torch.ops.aten.sym_storage_offset.default,
335
+ ],
336
+ "self: jt_all",
337
+ )
338
+ def tensor_attr_supported_getter(func, *args, **kwargs):
339
+ if func == torch.ops.aten.is_non_overlapping_and_dense.default:
340
+ return False
341
+
342
+ if func == torch.ops.aten.sym_size.default:
343
+ return args[0]._size
344
+
345
+ if func == torch.ops.aten.dim.default:
346
+ return len(args[0]._size)
347
+
348
+ if func == torch.ops.aten.sym_numel.default:
349
+ if args[0]._lengths is not None:
350
+ return int(sum(args[0]._lengths) * math.prod(args[0]._size[2:]))
351
+ return args[0]._values.numel()
352
+
353
+ if func == torch.ops.aten.sym_stride.default:
354
+ return args[0]._strides
355
+
356
+ if func == torch.ops.aten.sym_storage_offset.default:
357
+ return args[0]._values.storage_offset()
358
+
359
+
360
+ @register_jagged_func(torch.ops.prim.layout.default, "self: jt_all")
361
+ def prim_layout_default(func, *args, **kwargs):
362
+ return torch.jagged
363
+
364
+
365
+ @register_jagged_func(
366
+ [torch.ops.aten.size.default],
367
+ "self: jt_all",
368
+ )
369
+ def tensor_attr_unsupported_getter(func, *args, **kwargs):
370
+ if func == torch.ops.aten.size.default:
371
+ raise RuntimeError(
372
+ "NestedTensors does not support directly calling torch.ops.aten.size "
373
+ "please use `nested_tensor.size()` instead."
374
+ )
375
+
376
+
377
+ @register_jagged_func(torch.ops.aten.is_contiguous.default, "self: jt_all")
378
+ def is_contiguous_general(func, *args, **kwargs):
379
+ from torch._prims_common import is_contiguous_for_memory_format
380
+
381
+ _, new_kwargs = normalize_function(
382
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
383
+ )
384
+ inp = new_kwargs.pop("input")
385
+
386
+ # If created from narrow() check for lengths
387
+ if inp.lengths() is not None:
388
+ return False
389
+
390
+ new_kwargs["memory_format"] = new_kwargs.get(
391
+ "memory_format", torch.contiguous_format
392
+ )
393
+ if new_kwargs["memory_format"] == torch.preserve_format:
394
+ return True
395
+ return is_contiguous_for_memory_format(inp._values, **new_kwargs)
396
+
397
+
398
+ register_jagged_func(
399
+ torch.ops.aten.is_contiguous.memory_format, "self: jt_all, memory_format: any?"
400
+ )(is_contiguous_general)
401
+
402
+
403
+ @register_jagged_func(torch.ops.aten.linear.default, "input: jt, weight: t, bias: t?")
404
+ def linear_default(func, *args, **kwargs):
405
+ _, new_kwargs = normalize_function(
406
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
407
+ )
408
+
409
+ inp = new_kwargs.pop("input")
410
+
411
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
412
+
413
+
414
+ @register_jagged_func(
415
+ torch.ops.aten.linear_backward.default,
416
+ "self: jt, grad_output: jt, weight: t, output_mask: any",
417
+ )
418
+ def linear_backward_default(func, *args, **kwargs):
419
+ _, new_kwargs = normalize_function(
420
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
421
+ )
422
+
423
+ inp = new_kwargs.pop("input")
424
+ grad_output = new_kwargs.pop("grad_output")
425
+ weight = new_kwargs.pop("weight")
426
+
427
+ check_ragged_dim_same(func, inp, "self", grad_output, "grad_output")
428
+ ds = NestedTensor(
429
+ torch.mm(grad_output._values, weight), **extract_kwargs(grad_output)
430
+ )
431
+ dw = torch.mm(grad_output._values.T, inp._values)
432
+ db = None # NYI: gradient for bias, need to reduce over ragged dim
433
+ return (ds, dw, db)
434
+
435
+
436
+ @register_jagged_func(torch.ops.aten._to_copy.default, "self: jt_all")
437
+ def to_copy_default(func, *args, **kwargs):
438
+ _, new_kwargs = normalize_function(
439
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
440
+ )
441
+
442
+ inp = new_kwargs.pop("input")
443
+ # don't change layout
444
+ new_kwargs.pop("layout")
445
+
446
+ new_values = func(inp._values, **new_kwargs)
447
+ # NB: Purposefully keep offsets on the old device.
448
+ return NestedTensor(new_values, **extract_kwargs(inp))
449
+
450
+
451
+ register_jagged_func(
452
+ [
453
+ torch.ops.aten.empty_like.default,
454
+ torch.ops.aten.ones_like.default,
455
+ torch.ops.aten.zeros_like.default,
456
+ torch.ops.aten.randn_like.default,
457
+ torch.ops.aten.detach.default,
458
+ ],
459
+ "self: jt_all",
460
+ )(jagged_unary_pointwise)
461
+
462
+
463
+ register_jagged_func(
464
+ torch.ops.aten._softmax.default, "self: jt, dim: any, half_to_float: any"
465
+ )(jagged_unary_pointwise)
466
+
467
+
468
+ @register_jagged_func(
469
+ torch.ops.aten.native_dropout.default, "self: jt, float: any, train: any?"
470
+ )
471
+ def native_dropout_default(func, *args, **kwargs):
472
+ _, new_kwargs = normalize_function(
473
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
474
+ )
475
+
476
+ inp = new_kwargs.pop("input")
477
+ out1, out2 = func(inp._values, **new_kwargs)
478
+ return (
479
+ NestedTensor(out1, **extract_kwargs(inp)),
480
+ NestedTensor(out2, **extract_kwargs(inp)),
481
+ )
482
+
483
+
484
+ @register_jagged_func(
485
+ torch.ops.aten.native_dropout_backward.default,
486
+ "grad_output: jt, mask: jt, scale: any",
487
+ )
488
+ def native_dropout_backward_default(func, *args, **kwargs):
489
+ _, new_kwargs = normalize_function(
490
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
491
+ )
492
+ grad_output = new_kwargs.pop("grad_output")
493
+ mask = new_kwargs.pop("mask")
494
+ return NestedTensor(
495
+ func(grad_output._values, mask._values, **new_kwargs),
496
+ **extract_kwargs(grad_output),
497
+ )
498
+
499
+
500
+ @register_jagged_func(torch.ops.aten.prod.dim_int, "self: jt, dim: any, keepdim: any?")
501
+ def prod_dim_int(func, *args, **kwargs):
502
+ _, new_kwargs = normalize_function(
503
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
504
+ )
505
+
506
+ inp = new_kwargs.pop("input")
507
+ # TODO: Figure out how to handle this better
508
+ # keep_dim is required to keep it in jagged format
509
+ if not new_kwargs["keepdim"]:
510
+ raise RuntimeError("prod(): keepdim=True must be set for NestedTensor")
511
+ dim = new_kwargs["dim"]
512
+ new_kwargs["dim"] = _wrap_jagged_dim(len(inp._size), dim, "prod")
513
+
514
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(args[0]))
515
+
516
+
517
+ @register_jagged_func(
518
+ torch.ops.aten.split.Tensor, "self: jt, split_size: any, dim: any"
519
+ )
520
+ def split_tensor(func, *args, **kwargs):
521
+ _, new_kwargs = normalize_function(
522
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
523
+ )
524
+
525
+ inp = new_kwargs.pop("input")
526
+
527
+ new_kwargs["dim"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim"], "split")
528
+
529
+ return tuple(
530
+ NestedTensor(values=x, **extract_kwargs(inp))
531
+ for x in func(inp._values, **new_kwargs)
532
+ )
533
+
534
+
535
+ @register_jagged_func(
536
+ torch.ops.aten.split_with_sizes.default, "self: jt, split_sizes: any, dim: any"
537
+ )
538
+ def split_with_sizes_default(func, *args, **kwargs):
539
+ _, new_kwargs = normalize_function(
540
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
541
+ )
542
+
543
+ inp = new_kwargs.pop("input")
544
+
545
+ new_kwargs["dim"] = _wrap_jagged_dim(
546
+ inp.dim(), new_kwargs["dim"], "split_with_sizes"
547
+ )
548
+
549
+ return [
550
+ NestedTensor(values=x, **extract_kwargs(inp))
551
+ for x in func(inp._values, **new_kwargs)
552
+ ]
553
+
554
+
555
+ @register_jagged_func(torch.ops.aten.chunk.default, "self: jt, chunks: any, dim: any?")
556
+ def chunk_default(func, *args, **kwargs):
557
+ _, new_kwargs = normalize_function(
558
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
559
+ )
560
+
561
+ inp = new_kwargs.pop("input")
562
+
563
+ new_kwargs["dim"] = _wrap_jagged_dim(
564
+ inp.dim(), new_kwargs["dim"], "chunk", allow_batch_dim=True
565
+ )
566
+
567
+ if new_kwargs["dim"] == 0:
568
+ chunks = new_kwargs["chunks"]
569
+ dim0_size = inp._size[0]
570
+ chunk_size = math.ceil(dim0_size / chunks)
571
+
572
+ # get _offsets of the chunks
573
+ lengths = inp._offsets.diff()
574
+ chunked_lengths = lengths.chunk(chunks)
575
+ chunked_offsets = [torch.cumsum(x, dim=0) for x in chunked_lengths]
576
+ chunked_offsets = [F.pad(x, (1, 0), value=0) for x in chunked_offsets]
577
+ nested_kwargs = [
578
+ {"offsets": per_offsets, "_ragged_idx": inp._ragged_idx}
579
+ for per_offsets in chunked_offsets
580
+ ]
581
+
582
+ # get _values of the chunks
583
+ split_sizes = [x.sum().item() for x in chunked_lengths]
584
+ chunk_values = inp._values.split(split_sizes)
585
+
586
+ return [
587
+ NestedTensor(values=chunk_values[i], **(nested_kwargs[i]))
588
+ for i in range(0, chunk_size)
589
+ ]
590
+ else:
591
+ return [
592
+ NestedTensor(values=x, **extract_kwargs(inp))
593
+ for x in func(inp._values, **new_kwargs)
594
+ ]
595
+
596
+
597
+ @register_jagged_func(torch.ops.aten.unbind.int, "self: jt_all, dim: any?")
598
+ def unbind_int(func, *args, **kwargs):
599
+ # Note that this specializes on the length of the offsets
600
+ _, new_kwargs = normalize_function(
601
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
602
+ )
603
+
604
+ dim = new_kwargs["dim"]
605
+ if dim != 0:
606
+ raise RuntimeError("unbind(): only supported for NestedTensor on dim=0")
607
+
608
+ inp = new_kwargs.pop("input")
609
+ values = inp.values()
610
+ offsets = inp.offsets()
611
+ lengths = inp.lengths()
612
+
613
+ if inp._ragged_idx != 1:
614
+ raise RuntimeError(
615
+ "unbind(): only supported for NestedTensor when jagged dimension is 1"
616
+ )
617
+
618
+ if lengths is None:
619
+ return torch.split(values, offsets.diff().tolist())
620
+ return [
621
+ values[offsets[i] : (offsets[i] + lengths[i])] for i in range(lengths.shape[0])
622
+ ]
623
+
624
+
625
+ @register_jagged_func(torch.ops.aten.squeeze.dim, "self: jt, dim: any")
626
+ def squeeze_dim(func, *args, **kwargs):
627
+ _, new_kwargs = normalize_function(
628
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
629
+ )
630
+
631
+ inp = new_kwargs.pop("input")
632
+ values = inp._values
633
+
634
+ new_kwargs["dim"] = _wrap_jagged_dim(len(inp._size), new_kwargs["dim"], "squeeze")
635
+ return NestedTensor(func(values, **new_kwargs), **extract_kwargs(inp))
636
+
637
+
638
+ @register_jagged_func(torch.ops.aten.unsqueeze.default, "self: jt, dim: any")
639
+ def unsqueeze_default(func, *args, **kwargs):
640
+ _, new_kwargs = normalize_function(
641
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
642
+ )
643
+
644
+ inp = new_kwargs.pop("input")
645
+ values = inp._values
646
+
647
+ # Account for collapsed jagged dim
648
+ dim = new_kwargs["dim"]
649
+ new_kwargs["dim"] = _wrap_jagged_dim(len(inp._size) + 1, dim, "unsqueeze")
650
+ return NestedTensor(func(values, **new_kwargs), **extract_kwargs(inp))
651
+
652
+
653
+ @register_jagged_func(torch.ops.aten.cat.default, "tensors: any, dim: any")
654
+ def cat_default(func, *args, **kwargs):
655
+ _, new_kwargs = normalize_function(
656
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
657
+ )
658
+
659
+ tensors = new_kwargs.pop("tensors")
660
+
661
+ # Convert any non-nested to nested
662
+ nested = [t for t in tensors if t.is_nested]
663
+ assert len(nested) > 0
664
+ first = nested[0]
665
+ tensors = [t if t.is_nested else t.expand_as(first) for t in tensors]
666
+
667
+ # Account for collapsed jagged dim
668
+ dim = new_kwargs["dim"]
669
+ new_kwargs["dim"] = _wrap_jagged_dim(len(first.shape), dim, "cat")
670
+
671
+ return NestedTensor(
672
+ func([t._values for t in tensors], **new_kwargs), **extract_kwargs(tensors[0])
673
+ )
674
+
675
+
676
+ @register_jagged_func(torch.ops.aten.matmul.default, "self: jt, other: any")
677
+ def matmul_default(func, *args, **kwargs):
678
+ _, new_kwargs = normalize_function(
679
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
680
+ )
681
+
682
+ inp = new_kwargs.pop("input")
683
+ other = new_kwargs.pop("other")
684
+
685
+ if inp.is_nested and not other.is_nested:
686
+ return NestedTensor(
687
+ func(inp._values, other, **new_kwargs), **extract_kwargs(inp)
688
+ )
689
+ elif inp.is_nested and other.is_nested:
690
+ # BMM with equivalent ragged dims between the two inputs
691
+ if inp.dim() > 3 and other.dim() > 3 and raggedness_matches(inp, other._size):
692
+ return NestedTensor(func(inp._values, other._values), **extract_kwargs(inp))
693
+
694
+ raise RuntimeError(
695
+ f"matmul(): not supported between inputs of shapes {inp._size} and {other.shape}"
696
+ )
697
+
698
+
699
+ @register_jagged_func(
700
+ torch.ops.aten.expand.default, "self: jt, size: any, implicit: any?"
701
+ )
702
+ def expand_default(func, *args, **kwargs):
703
+ _, new_kwargs = normalize_function(
704
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
705
+ )
706
+
707
+ inp = new_kwargs.pop("input")
708
+ size = new_kwargs["size"]
709
+
710
+ assert ("implicit" not in new_kwargs) or (not new_kwargs.pop("implicit"))
711
+ if not raggedness_matches(inp, size):
712
+ raise RuntimeError(f"expand(): cannot expand shape {inp._size} -> {size}")
713
+
714
+ expand_arg = [-1, *size[2:]]
715
+ return NestedTensor(func(inp._values, expand_arg), **extract_kwargs(inp))
716
+
717
+
718
+ @register_jagged_func(torch.ops.aten.expand_as.default, "self: t, other: jt")
719
+ def expand_as_default(func, *args, **kwargs):
720
+ _, new_kwargs = normalize_function(
721
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
722
+ )
723
+
724
+ inp = new_kwargs.pop("input")
725
+ other = new_kwargs.pop("other")
726
+
727
+ return NestedTensor(func(inp, other._values), **extract_kwargs(other))
728
+
729
+
730
+ @register_jagged_func(torch.ops.aten.where.self, "condition: jt, self: jt, other: jt")
731
+ def where_self(func, *args, **kwargs):
732
+ _, new_kwargs = normalize_function(
733
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
734
+ )
735
+
736
+ condition = new_kwargs.pop("condition")
737
+ inp = new_kwargs.pop("input")
738
+ other = new_kwargs.pop("other")
739
+
740
+ assert condition._size == other._size == inp._size
741
+
742
+ return NestedTensor(
743
+ func(condition._values, inp._values, other._values, **new_kwargs),
744
+ **extract_kwargs(condition),
745
+ )
746
+
747
+
748
+ @register_jagged_func(torch.ops.aten._pin_memory.default, "self: jt, device: any?")
749
+ def _pin_memory_default(func, *args, **kwargs):
750
+ _, new_kwargs = normalize_function(
751
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
752
+ )
753
+
754
+ inp = new_kwargs.pop("input")
755
+
756
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
757
+
758
+
759
+ @register_jagged_func(torch.ops.aten.is_pinned.default, "self: jt, device: any?")
760
+ def is_pinned_default(func, *args, **kwargs):
761
+ _, new_kwargs = normalize_function(
762
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
763
+ )
764
+
765
+ inp = new_kwargs.pop("input")
766
+
767
+ return func(inp._values, **new_kwargs)
768
+
769
+
770
+ @register_jagged_func(
771
+ torch.ops.aten.is_same_size.default, "self: jt_all, other: jt_all"
772
+ )
773
+ def is_same_size_default(func, *args, **kwargs):
774
+ return args[0]._size == args[1]._size
775
+
776
+
777
+ @register_jagged_func(
778
+ torch.ops.aten.sum.dim_IntList, "self: jt, dim: any?, keepdim: any?, dtype: any?"
779
+ )
780
+ def sum_dim_IntList(func, *args, **kwargs):
781
+ # sum_dim_IntList can produce a NT or a T depending on whether the ragged dims
782
+ # are reduced away.
783
+ _, new_kwargs = normalize_function(
784
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
785
+ )
786
+ inp = new_kwargs.pop("input")
787
+ assert inp._ragged_idx == 1
788
+ new_kwargs["dim"], ragged_reduced_away = _wrap_jagged_dims(
789
+ inp.dim(), new_kwargs["dim"], "sum"
790
+ )
791
+
792
+ if not ragged_reduced_away:
793
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
794
+ else:
795
+ # Don't wrap because we reduced away the raggedness
796
+ out = func(inp._values, **new_kwargs)
797
+ if new_kwargs["keepdim"]:
798
+ out = out.unsqueeze(0)
799
+ return out
800
+
801
+
802
+ @register_jagged_func(
803
+ torch.ops.aten.transpose.int, "self: jt_all, dim0: any, dim1: any"
804
+ )
805
+ def transpose_int(func, *args, **kwargs):
806
+ _, new_kwargs = normalize_function(
807
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
808
+ )
809
+
810
+ from torch._prims_common import canonicalize_dims
811
+
812
+ inp = new_kwargs.pop("input")
813
+ dim0, dim1 = canonicalize_dims(inp.dim(), (new_kwargs["dim0"], new_kwargs["dim1"]))
814
+
815
+ if inp._lengths is not None:
816
+ raise ValueError(
817
+ "transpose(): not supported on jagged layout nested tensor with holes"
818
+ )
819
+
820
+ # To support the SDPA API, inputs need to have the ragged idx transposed to dim 2
821
+ # instead of 1, although the internal Flash and mem-effn implementations will
822
+ # use the inputs with raggedness in dim 1.
823
+ if dim0 == inp._ragged_idx or dim1 == inp._ragged_idx:
824
+ if dim0 == 0 or dim1 == 0:
825
+ raise ValueError(
826
+ "Transpose is not supported on the batch dimension for jagged NT"
827
+ )
828
+ if dim0 == inp._ragged_idx:
829
+ to_dim = dim1
830
+ else:
831
+ to_dim = dim0
832
+ inp_kwargs = extract_kwargs(inp)
833
+ inp_kwargs["_ragged_idx"] = to_dim
834
+ return NestedTensor(
835
+ inp.values().transpose(
836
+ _outer_to_inner_dim(len(inp._size), dim0),
837
+ _outer_to_inner_dim(len(inp._size), dim1),
838
+ ),
839
+ **inp_kwargs,
840
+ )
841
+
842
+ new_kwargs["dim0"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim0"], "transpose")
843
+ new_kwargs["dim1"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim1"], "transpose")
844
+
845
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
846
+
847
+
848
+ @register_jagged_func(
849
+ [torch.ops.aten.view.default, torch.ops.aten._unsafe_view.default],
850
+ "self: jt_all, size: any",
851
+ )
852
+ def view_default(func, *args, **kwargs):
853
+ _, new_kwargs = normalize_function(
854
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
855
+ )
856
+
857
+ inp = new_kwargs.pop("input")
858
+ size = new_kwargs.pop("size")
859
+
860
+ if inp._ragged_idx != 1 and tuple(inp._size) != tuple(size):
861
+ raise RuntimeError(
862
+ f"view(): does not support ragged_idx != 1 except when inp._size == size. "
863
+ f"inp._size is ({inp._size}) and size is ({size})."
864
+ )
865
+
866
+ # Ensure specified size still includes batch and ragged dims
867
+ if len(size) < 3 or not raggedness_matches(inp, size):
868
+ raise RuntimeError(f"view(): cannot view shape {inp._size} as {size}")
869
+
870
+ # outer size: the size of the NT, e.g. [3, j0, 10]
871
+ # inner size: the size of the values, e.g. [8, 10] (e.g. for offsets = [0, 3, 5, 8])
872
+ # this function gets inner_size[inner_idx] for a given inner_idx.
873
+ #
874
+ # example: for outer size [a, b, c, j0, d, e, f]
875
+ # assume that j0 is ragged, other are concrete integers
876
+ # and ragged_idx=3
877
+ # inner size will be [b, c, inp._values.size(ragged_idx), d, e, f]
878
+ # therefore:
879
+ # inner_size[0] = outer_size[1]
880
+ # inner_size[1] = outer_size[2]
881
+ # inner_size[0] = inp._values.size(ragged_idx - 1)
882
+ # inner_size[3] = outer_size[4]
883
+ # inner_size[4] = outer_size[5]
884
+ def get_inner_size(inner_idx):
885
+ nonlocal inp, size
886
+ if inner_idx == inp._ragged_idx - 1:
887
+ return inp._values.size(inner_idx)
888
+ else:
889
+ return size[inner_idx + 1]
890
+
891
+ inner_size = [get_inner_size(i) for i in range(len(size) - 1)]
892
+
893
+ return NestedTensor(func(inp._values, inner_size), **extract_kwargs(inp))
894
+
895
+
896
+ @register_jagged_func(
897
+ torch.ops.aten.native_layer_norm.default,
898
+ "input: jt, normalized_shape: any, weight: any?, bias: any?, eps: any",
899
+ )
900
+ def native_layer_norm_default(func, *args, **kwargs):
901
+ _, new_kwargs = normalize_function(
902
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
903
+ )
904
+
905
+ inp = new_kwargs.pop("input")
906
+ normalized_shape = new_kwargs["normalized_shape"]
907
+
908
+ # Ensure we're not trying to normalize over the ragged dim
909
+ if inp.dim() < 3 or (inp.dim() - len(normalized_shape)) < 2:
910
+ raise RuntimeError(
911
+ "layer_norm(): normalizing over ragged dim not supported for nested tensors"
912
+ )
913
+
914
+ output, mean, std = func(inp._values, **new_kwargs)
915
+ return (NestedTensor(output, **extract_kwargs(inp)), mean, std)
916
+
917
+
918
+ @register_jagged_func(
919
+ torch.ops.aten.native_layer_norm_backward.default,
920
+ "grad_out: jt, input: jt, normalized_shape: any, mean: any, rstd: any, weight: any?, bias: any?, output_mask: any",
921
+ )
922
+ def native_layer_norm_backward_default(func, *args, **kwargs):
923
+ _, new_kwargs = normalize_function(
924
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
925
+ )
926
+ grad_out = new_kwargs.pop("grad_out")
927
+ inp = new_kwargs.pop("input")
928
+ d_input, d_gamma, d_beta = func(grad_out._values, inp._values, **new_kwargs)
929
+ if d_input is None:
930
+ return (None, d_gamma, d_beta)
931
+
932
+ return (NestedTensor(d_input, **extract_kwargs(inp)), d_gamma, d_beta)
933
+
934
+
935
+ @register_jagged_func(torch.ops.aten.select.int, "self: jt, dim: any, index: any")
936
+ def select_int(func, *args, **kwargs):
937
+ _, new_kwargs = normalize_function(
938
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
939
+ )
940
+
941
+ inp = new_kwargs.pop("input")
942
+ new_kwargs["dim"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim"], "select")
943
+
944
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
945
+
946
+
947
+ @register_jagged_func(
948
+ torch.ops.aten.slice.Tensor,
949
+ "self: jt, dim: any?, start: any?, end: any?, step: any?",
950
+ )
951
+ def slice_tensor(func, *args, **kwargs):
952
+ _, new_kwargs = normalize_function(
953
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
954
+ )
955
+
956
+ inp = new_kwargs.pop("input")
957
+ new_kwargs["dim"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim"], "slice")
958
+
959
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
960
+
961
+
962
+ @register_jagged_func(
963
+ torch.ops.aten.convolution.default,
964
+ "input: jt, weight: t, bias: t?, stride: any, padding: any, "
965
+ "dilation: any, transposed: any, output_padding: any, groups: any",
966
+ )
967
+ def convolution_default(func, *args, **kwargs):
968
+ _, new_kwargs = normalize_function(
969
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
970
+ )
971
+
972
+ inp = new_kwargs.pop("input")
973
+
974
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
975
+
976
+
977
+ @register_jagged_func(
978
+ torch.ops.aten.mean.dim, "self: jt, dim: any?, keepdim: any, dtype: any?"
979
+ )
980
+ def mean_dim(func, *args, **kwargs):
981
+ _, new_kwargs = normalize_function(
982
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
983
+ )
984
+
985
+ inp = new_kwargs.pop("input")
986
+ # NB: mean expects dim as a single item list of ints for some reason
987
+ new_kwargs["dim"] = [_wrap_jagged_dim(inp.dim(), new_kwargs["dim"][0], "mean")]
988
+
989
+ return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
990
+
991
+
992
+ @register_jagged_func(torch.ops.aten.stack.default, "tensors: any, dim: any")
993
+ def stack_default(func, *args, **kwargs):
994
+ _, new_kwargs = normalize_function(
995
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
996
+ )
997
+
998
+ # guaranteed this is non-empty if we got here
999
+ tensors = new_kwargs.pop("tensors")
1000
+ for t in tensors:
1001
+ if not isinstance(t, NestedTensor):
1002
+ raise RuntimeError("stack(): expected all nested tensors inputs")
1003
+
1004
+ if t.dim() != tensors[0].dim():
1005
+ raise RuntimeError(
1006
+ "stack(): expected all nested tensors to have the same dim"
1007
+ )
1008
+
1009
+ if not raggedness_matches(t, tensors[0].shape):
1010
+ raise RuntimeError(
1011
+ "stack(): expected all nested tensors to have the same nested structure"
1012
+ )
1013
+
1014
+ new_kwargs["dim"] = _wrap_jagged_dim(
1015
+ tensors[0].dim() + 1, new_kwargs["dim"], "stack"
1016
+ )
1017
+
1018
+ return NestedTensor(
1019
+ func([t._values for t in tensors], **new_kwargs), **extract_kwargs(tensors[0])
1020
+ )
1021
+
1022
+
1023
+ @register_jagged_func(
1024
+ torch.ops.aten.embedding.default,
1025
+ "weight: t, indices: jt, padding_idx: any?, scale_grad_by_freq: any?, sparse: any?",
1026
+ )
1027
+ def embedding_default(func, *args, **kwargs):
1028
+ _, new_kwargs = normalize_function(
1029
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1030
+ )
1031
+
1032
+ # guaranteed this is non-empty if we got here
1033
+ indices = new_kwargs.pop("indices")
1034
+ weight = new_kwargs.pop("weight")
1035
+
1036
+ return NestedTensor(
1037
+ func(weight, indices._values, **new_kwargs), **extract_kwargs(indices)
1038
+ )
1039
+
1040
+
1041
+ @register_jagged_func(
1042
+ [
1043
+ torch.ops.aten.values.default,
1044
+ torch.ops.aten._nested_get_values.default,
1045
+ ],
1046
+ "self: jt_all",
1047
+ )
1048
+ def values_default(func, *args, **kwargs):
1049
+ _, new_kwargs = normalize_function(
1050
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1051
+ )
1052
+
1053
+ inp = new_kwargs.pop("input")
1054
+
1055
+ # TODO: Handle inference mode properly.
1056
+ # See https://github.com/pytorch/pytorch/issues/112024#issuecomment-1779554292
1057
+ return inp._values.detach()
1058
+
1059
+
1060
+ @register_jagged_func(
1061
+ torch.ops.aten._nested_view_from_jagged.default,
1062
+ "values: t, offsets: t, dummy: jt_all, lengths: t?, ragged_idx: any?",
1063
+ )
1064
+ def _nested_view_from_jagged_default(func, *args, **kwargs):
1065
+ _, new_kwargs = normalize_function(
1066
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1067
+ )
1068
+
1069
+ values, offsets, lengths = (
1070
+ new_kwargs["input"],
1071
+ new_kwargs["offsets"],
1072
+ new_kwargs["lengths"],
1073
+ )
1074
+ ragged_idx = new_kwargs["ragged_idx"]
1075
+
1076
+ return NestedTensor(values, offsets, lengths=lengths, _ragged_idx=ragged_idx)
1077
+
1078
+
1079
+ @register_jagged_func(torch.ops.aten._nested_get_offsets.default, "self: jt_all")
1080
+ def _nested_get_offsets(func, *args, **kwargs):
1081
+ _, new_kwargs = normalize_function(
1082
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1083
+ )
1084
+
1085
+ inp = new_kwargs.pop("input")
1086
+ return inp._offsets
1087
+
1088
+
1089
+ @register_jagged_func(torch.ops.aten._nested_get_lengths.default, "self: jt_all")
1090
+ def _nested_get_lengths(func, *args, **kwargs):
1091
+ _, new_kwargs = normalize_function(
1092
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1093
+ )
1094
+
1095
+ inp = new_kwargs.pop("input")
1096
+ return inp._lengths
1097
+
1098
+
1099
+ @register_jagged_func(torch.ops.aten._nested_get_ragged_idx.default, "self: jt_all")
1100
+ def _nested_get_ragged_idx(func, *args, **kwargs):
1101
+ _, new_kwargs = normalize_function(
1102
+ func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
1103
+ )
1104
+
1105
+ inp = new_kwargs.pop("input")
1106
+ return inp._ragged_idx
1107
+
1108
+
1109
+ # Make the dummy available on the C++ side.
1110
+ @register_jagged_func(torch.ops.aten._nested_get_jagged_dummy.default, "self: any")
1111
+ def _nested_get_jagged_dummy(func, *args, **kwargs):
1112
+ from torch.nested._internal.nested_tensor import _nt_view_dummy
1113
+
1114
+ return _nt_view_dummy
1115
+
1116
+
1117
+ with torch.library._scoped_library("aten", "IMPL") as aten:
1118
+ aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "CPU")
1119
+ aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "CUDA")
1120
+ aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "Meta")
venv/lib/python3.10/site-packages/torch/nested/_internal/sdpa.py ADDED
@@ -0,0 +1,780 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Optional, Tuple
3
+
4
+ import torch
5
+ import torch.nn
6
+ import torch.nn.functional as F
7
+ from torch.backends.cuda import (
8
+ can_use_efficient_attention,
9
+ can_use_flash_attention,
10
+ flash_sdp_enabled,
11
+ math_sdp_enabled,
12
+ mem_efficient_sdp_enabled,
13
+ SDPAParams,
14
+ )
15
+
16
+ from torch.nn.attention import SDPBackend
17
+ from .nested_tensor import NestedTensor
18
+
19
+ log = logging.getLogger(__name__)
20
+
21
+
22
+ def _validate_sdpa_input(
23
+ query: torch.Tensor,
24
+ key: torch.Tensor,
25
+ value: torch.Tensor,
26
+ attn_mask: Optional[torch.Tensor] = None,
27
+ dropout_p=0.0,
28
+ is_causal=False,
29
+ scale=None,
30
+ ):
31
+ if (
32
+ not isinstance(query, NestedTensor)
33
+ or not isinstance(key, NestedTensor)
34
+ or not isinstance(value, NestedTensor)
35
+ ):
36
+ raise ValueError(
37
+ f"Expected query, key, and value to be nested tensors, "
38
+ f"but got query.is_nested: {query.is_nested}, key.is_nested: {key.is_nested}, "
39
+ f"and value.is_nested: {value.is_nested} instead."
40
+ )
41
+ if query.dtype != key.dtype or query.dtype != value.dtype:
42
+ raise ValueError(
43
+ f"Expected query, key, and value to have the same dtype, "
44
+ f"but got query.dtype: {query.dtype}, key.dtype: {key.dtype}, "
45
+ f"and value.dtype: {value.dtype} instead."
46
+ )
47
+ if query.device != key.device or query.device != value.device:
48
+ raise ValueError(
49
+ f"Expected query, key, and value to have the same device type, "
50
+ f"but got query.device: {query.device}, key.device: {key.device}, "
51
+ f"and value.device: {value.device} instead."
52
+ )
53
+ if query.dim() < 2 or key.dim() < 2 or value.dim() < 2:
54
+ raise ValueError(
55
+ f"Expected query, key, and value to all be at least 2 dimensional, but got query.dim: "
56
+ f"{query.dim()}, key.dim: {key.dim()} and value.dim: {value.dim()} instead."
57
+ )
58
+ if query._ragged_idx != key._ragged_idx or query._ragged_idx != value._ragged_idx:
59
+ raise ValueError(
60
+ f"Expected query, key, and value to all be ragged on the same dimension, but got ragged "
61
+ f"dims {query._ragged_idx}, {key._ragged_idx}, and {value._ragged_idx}, respectively."
62
+ )
63
+ if attn_mask is not None:
64
+ # TODO: Figure out whether masks are actually supported for this layout or not
65
+ raise ValueError("Masks are not yet supported!")
66
+ if attn_mask.dtype != torch.bool and attn_mask.dtype != query.dtype:
67
+ raise ValueError(
68
+ f"Expected attn_mask dtype to be bool or to match query dtype, but got attn_mask.dtype: "
69
+ f"{attn_mask.dtype}, and query.dtype: {query.dtype} instead."
70
+ )
71
+
72
+
73
+ def _check_batch_size_nested(params: SDPAParams, debug=False) -> bool:
74
+ # This is expected to be called after check_tensor_shapes ensuring that the
75
+ # size() calls won't error since the inputs are all 4 dimensional
76
+ q_batch_size = params.query.size(0)
77
+ k_batch_size = params.key.size(0)
78
+ v_batch_size = params.value.size(0)
79
+
80
+ # num_heads logic for nested input is checked in
81
+ # check_for_seq_len_0_nested_tensor as there is handling there to make sure
82
+ # num_heads is not ragged
83
+ return q_batch_size == k_batch_size and q_batch_size == v_batch_size
84
+
85
+
86
+ def _check_head_dim_size_flash_nested(params: SDPAParams, debug=False) -> bool:
87
+ max_size = 256
88
+ query_size_last = params.query.size(-1)
89
+ key_size_last = params.key.size(-1)
90
+ value_size_last = params.value.size(-1)
91
+ same_head_dim_size = (
92
+ query_size_last == key_size_last and query_size_last == value_size_last
93
+ )
94
+ if not (
95
+ same_head_dim_size
96
+ and (query_size_last % 8 == 0)
97
+ and (query_size_last <= max_size)
98
+ ):
99
+ if debug:
100
+ log.warning(
101
+ "For NestedTensor inputs, Flash attention requires q,k,v to have the same "
102
+ "last dimension and to be a multiple of 8 and less than or equal to 256. "
103
+ "Got Query.size(-1): %d, Key.size(-1): %d, Value.size(-1): %d instead.",
104
+ query_size_last,
105
+ key_size_last,
106
+ value_size_last,
107
+ )
108
+ return False
109
+ return True
110
+
111
+
112
+ def _check_for_seq_len_0_and_consistent_head_dim_nested_helper(
113
+ param: torch.Tensor, param_name: str, debug=False
114
+ ) -> bool:
115
+ assert isinstance(param, NestedTensor), "param should be a jagged NT"
116
+
117
+ if param._ragged_idx == 1:
118
+ # num_head_dims is ragged
119
+ if debug:
120
+ log.warning(
121
+ "Fused kernels do not support ragged num_head_dims, %s has a ragged num_heads.",
122
+ param_name,
123
+ )
124
+ return False
125
+
126
+ # This is being called inside sdp with shape [batch, heads, {seq_len}, dim]
127
+ if param._min_seqlen == 0:
128
+ if debug:
129
+ log.warning(
130
+ "Fused kernels do not support seq_len == 0, %s has a seq len of 0.",
131
+ param_name,
132
+ )
133
+ return False
134
+
135
+ return True
136
+
137
+
138
+ def _try_broadcast_param_size(q_size, k_size, v_size, param_name, debug=False) -> bool:
139
+ max_size = max(q_size, k_size, v_size)
140
+ if (
141
+ (q_size != max_size and q_size != 1)
142
+ or (k_size != max_size and k_size != 1)
143
+ or (v_size != max_size and v_size != 1)
144
+ ):
145
+ if debug:
146
+ log.warning(
147
+ "Both fused kernels require query, key and value to have broadcastable %s, "
148
+ "got Query %s %d, Key %s %d, Value %s %d instead.",
149
+ param_name,
150
+ param_name,
151
+ q_size,
152
+ param_name,
153
+ k_size,
154
+ param_name,
155
+ v_size,
156
+ )
157
+ return False
158
+ return True
159
+
160
+
161
+ def _check_for_seq_len_0_nested(params: SDPAParams, debug=False) -> bool:
162
+ # When this function is called we are assured that the nt is dim==4
163
+ q_is_safe = (
164
+ _check_for_seq_len_0_and_consistent_head_dim_nested_helper(
165
+ params.query, "query", debug
166
+ )
167
+ if params.query.is_nested
168
+ else True
169
+ )
170
+ # short circuit if any is unsafe
171
+ if not q_is_safe:
172
+ return False
173
+
174
+ k_is_safe = (
175
+ _check_for_seq_len_0_and_consistent_head_dim_nested_helper(
176
+ params.key, "key", debug
177
+ )
178
+ if params.key.is_nested
179
+ else True
180
+ )
181
+ # short circuit if any is unsafe
182
+ if not k_is_safe:
183
+ return False
184
+
185
+ v_is_safe = (
186
+ _check_for_seq_len_0_and_consistent_head_dim_nested_helper(
187
+ params.value, "value", debug
188
+ )
189
+ if params.value.is_nested
190
+ else True
191
+ )
192
+ # short circuit if any is unsafe
193
+ if not v_is_safe:
194
+ return False
195
+
196
+ # We now know none of the inputs have ragged num_heads, so we can safely
197
+ # access .size(1)
198
+ q_num_heads = params.query.size(1)
199
+ k_num_heads = params.key.size(1)
200
+ v_num_heads = params.value.size(1)
201
+ same_num_heads = q_num_heads == k_num_heads and q_num_heads == v_num_heads
202
+
203
+ if not same_num_heads:
204
+ if (
205
+ params.query.requires_grad
206
+ or params.key.requires_grad
207
+ or params.value.requires_grad
208
+ ):
209
+ if debug:
210
+ log.warning(
211
+ "Both fused kernels do not support training with broadcasted NT inputs."
212
+ )
213
+ return False
214
+ return _try_broadcast_param_size(
215
+ q_num_heads, k_num_heads, v_num_heads, "num heads", debug
216
+ )
217
+ return True
218
+
219
+
220
+ def _can_use_flash_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
221
+ constraints = (
222
+ _check_batch_size_nested,
223
+ _check_head_dim_size_flash_nested,
224
+ _check_for_seq_len_0_nested,
225
+ )
226
+ for constraint in constraints:
227
+ if not constraint(params, debug):
228
+ return False
229
+ return True
230
+
231
+
232
+ def _can_use_efficient_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
233
+ constraints = (
234
+ _check_batch_size_nested,
235
+ _check_for_seq_len_0_nested,
236
+ )
237
+ for constraint in constraints:
238
+ if not constraint(params, debug):
239
+ return False
240
+ return True
241
+
242
+
243
+ def _can_use_math_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
244
+ if (
245
+ not params.query.transpose(1, 2).is_contiguous()
246
+ or not params.key.transpose(1, 2).is_contiguous()
247
+ or not params.value.transpose(1, 2).is_contiguous()
248
+ ):
249
+ if debug:
250
+ log.warning(
251
+ "If inputs are nested tensors they must be contiguous after transposing."
252
+ )
253
+ return False
254
+ if params.is_causal:
255
+ if debug:
256
+ log.warning(
257
+ "Nested tensors for query / key are not supported when is_causal=True."
258
+ )
259
+ return False
260
+ return True
261
+
262
+
263
+ def _select_sdp_backend(query, key, value, attn_mask, dropout, is_causal):
264
+ if (
265
+ not flash_sdp_enabled()
266
+ and not mem_efficient_sdp_enabled()
267
+ and not math_sdp_enabled()
268
+ ):
269
+ return SDPBackend.ERROR
270
+
271
+ ordering = (
272
+ SDPBackend.FLASH_ATTENTION,
273
+ SDPBackend.EFFICIENT_ATTENTION,
274
+ SDPBackend.MATH,
275
+ )
276
+
277
+ params = SDPAParams(query, key, value, attn_mask, dropout, is_causal)
278
+
279
+ for backend in ordering:
280
+ if backend == SDPBackend.FLASH_ATTENTION:
281
+ if can_use_flash_attention(params) and _can_use_flash_sdpa_jagged(params):
282
+ return SDPBackend.FLASH_ATTENTION
283
+ if backend == SDPBackend.EFFICIENT_ATTENTION:
284
+ if can_use_efficient_attention(params) and _can_use_efficient_sdpa_jagged(
285
+ params
286
+ ):
287
+ return SDPBackend.EFFICIENT_ATTENTION
288
+ if backend == SDPBackend.MATH:
289
+ if math_sdp_enabled() and _can_use_math_sdpa_jagged(params):
290
+ return SDPBackend.MATH
291
+
292
+ log.warning("Memory efficient kernel not used because:")
293
+ can_use_efficient_attention(params, debug=True)
294
+ _can_use_efficient_sdpa_jagged(params, debug=True)
295
+ log.warning("Flash attention kernel not used because:")
296
+ can_use_flash_attention(params, debug=True)
297
+ _can_use_flash_sdpa_jagged(params, debug=True)
298
+ log.warning("Math attention kernel not used because:")
299
+ _can_use_math_sdpa_jagged(params, debug=True)
300
+ return SDPBackend.ERROR
301
+
302
+
303
+ def _cumulative_and_max_seq_len_nnz(qkv: torch.Tensor) -> Tuple[torch.Tensor, int, int]:
304
+ # This function is used to calculate two pieces of metadata that are needed
305
+ # for use with flash-attention and efficient_attention kernels. They are the
306
+ # cumulative sequence_length over a batch of sequences and the maximum
307
+ # sequence length.
308
+
309
+ # It returns a tuple of cumulative sequence lengths and the maximum sequence
310
+ # length, and the last element in the cumulative_sequence_lengths
311
+ if not isinstance(qkv, NestedTensor):
312
+ raise ValueError("QKV must be nested for flash cumulative_seq_len calculation.")
313
+
314
+ if qkv.lengths() is None:
315
+ # TODO: Explore performance impact of copying
316
+ cumulative_seqlen = qkv.offsets().to(dtype=torch.int32, device=qkv.device)
317
+ max_seqlen = qkv._max_seqlen
318
+ n_elem = qkv.values().shape[0]
319
+ else:
320
+ # TODO: Explore performance impact of copying
321
+ cumulative_seqlen = (
322
+ qkv.lengths().cumsum(0).to(dtype=torch.int32, device=qkv.device)
323
+ )
324
+ batch_size = qkv.size(0)
325
+ max_seqlen = qkv._max_seqlen
326
+ # TODO: Explore performance impact when compiling
327
+ n_elem = int(cumulative_seqlen[-1].item())
328
+ return cumulative_seqlen, max_seqlen, n_elem
329
+
330
+
331
+ def _is_safe_to_get_storage_as_tensor(tensor: torch.Tensor):
332
+ # This function checks if a nested tensor is valid for
333
+ # use with the flash-attention and efficient_attention kernels without
334
+ # needing to call contiguous on the nested tensor input.
335
+ # It checks that the storage offsets' adjacent_differences are a constant
336
+ # mutiple of the previous tensor in the nested tensor and that the strides
337
+ # are monitonically decreasing. This check is done after calling transpose on
338
+ # the nested tensor resulting in a Nt of shape [bsz, {seq_len}, num_heads, dim]
339
+
340
+ # Returns a boolean indicating if contiguous needs to be called for input
341
+ assert isinstance(tensor, NestedTensor)
342
+ offsets = tensor.offsets()
343
+ strides = tensor._strides
344
+
345
+ n_tensors = offsets.size(0) - 1
346
+ if n_tensors <= 1:
347
+ return True
348
+
349
+ # Check initially that the tensor strides are in strictly descending order
350
+ prev_stride = strides[1]
351
+ for stride in strides[2:]:
352
+ if prev_stride <= stride:
353
+ # This would mean that the last stride is greater than the seq_len
354
+ # stride
355
+ return False
356
+ prev_stride = stride
357
+
358
+ # Congrats you made it!
359
+ return True
360
+
361
+
362
+ def _view_as_dense(
363
+ tensor: torch.Tensor, Nnz: int, num_heads: int, head_dim: int
364
+ ) -> torch.Tensor:
365
+ if tensor.is_nested:
366
+ return tensor.values()
367
+ return tensor.view(Nnz, num_heads, head_dim)
368
+
369
+
370
+ # TODO: Next iteration should add test cases and check it works
371
+ # def _sdpa_nested_preprocessing_with_broadcast(query, key, value):
372
+ # # Query (Batch x Num_heads x {Q_seq_len} x Dim_per_head)
373
+ # # Key (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
374
+ # # Value (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
375
+ # q_batch_size = query.size(0)
376
+ # k_batch_size = key.size(0)
377
+ # v_batch_size = value.size(0)
378
+
379
+ # output_batch_size = max(q_batch_size, k_batch_size, v_batch_size)
380
+
381
+ # q_num_heads = query.size(1)
382
+ # k_num_heads = key.size(1)
383
+ # v_num_heads = value.size(1)
384
+
385
+ # output_num_heads = max(q_num_heads, k_num_heads, v_num_heads)
386
+
387
+ # head_dim_qk = query.size(3)
388
+ # head_dim_v = value.size(3)
389
+
390
+ # q_t = query.transpose(1, 2)
391
+ # k_t = key.transpose(1, 2)
392
+ # v_t = value.transpose(1, 2)
393
+
394
+ # # Checks in sdp_utils ensure that if {*}_batch_size/{*}_num_heads !=
395
+ # # output_batch_size/num_heads then they are 1
396
+ # q_batch_size_needs_broadcast = q_batch_size != output_batch_size
397
+ # k_batch_size_needs_broadcast = k_batch_size != output_batch_size
398
+ # v_batch_size_needs_broadcast = v_batch_size != output_batch_size
399
+
400
+ # # If {*}_batch_size_needs_broadcast, then
401
+ # # (1) max_seqlen_batch_{*} is given by {*}_t.size(1)
402
+ # # this is because needs_broadcast indicates that the batch_size is 1
403
+ # # and hence there is only 1 value for seq_len
404
+ # # (2) The cum_seq_lens are given by [0, {*}_t.size(1), 2 * {*}_t.size(1),
405
+ # # ..., outut_batch_size * {*}_t.size(1)]
406
+ # # (3) Nnz_{*} is given by output_batch_size * {*}_t.size(1)
407
+
408
+ # if q_batch_size_needs_broadcast or not q_t.is_nested:
409
+ # max_seqlen_batch_q = q_t.size(1)
410
+ # cumulative_sequence_length_q = torch.arange(
411
+ # 0,
412
+ # (output_batch_size + 1) * max_seqlen_batch_q,
413
+ # max_seqlen_batch_q,
414
+ # device=q_t.device,
415
+ # dtype=torch.int32,
416
+ # )
417
+ # Nnz_q = output_batch_size * max_seqlen_batch_q
418
+ # else:
419
+ # (
420
+ # cumulative_sequence_length_q,
421
+ # max_seqlen_batch_q,
422
+ # Nnz_q,
423
+ # ) = _cumulative_and_max_seq_len_nnz(q_t)
424
+
425
+ # if k_batch_size_needs_broadcast and v_batch_size_needs_broadcast:
426
+ # assert k_t.size(1) == v_t.size(1)
427
+ # max_seqlen_batch_kv = k_t.size(1)
428
+ # cumulative_sequence_length_kv = torch.arange(
429
+ # 0,
430
+ # (output_batch_size + 1) * max_seqlen_batch_kv,
431
+ # max_seqlen_batch_kv,
432
+ # device=k_t.device,
433
+ # dtype=torch.int32,
434
+ # )
435
+ # Nnz_kv = output_batch_size * max_seqlen_batch_kv
436
+ # else:
437
+ # cumulative_sequence_length_kv, max_seqlen_batch_kv, Nnz_kv = (
438
+ # _cumulative_and_max_seq_len_nnz(v_t)
439
+ # if k_batch_size_needs_broadcast
440
+ # else _cumulative_and_max_seq_len_nnz(k_t)
441
+ # )
442
+
443
+ # q_num_heads_needs_broadcast = q_num_heads != output_num_heads
444
+ # k_num_heads_needs_broadcast = k_num_heads != output_num_heads
445
+ # v_num_heads_needs_broadcast = v_num_heads != output_num_heads
446
+
447
+ # if not q_t.is_nested:
448
+ # query_buffer_reshaped = q_t.expand(
449
+ # output_batch_size, q_t.size(1), output_num_heads, head_dim_qk
450
+ # )
451
+ # query_buffer_reshaped = query_buffer_reshaped.reshape(
452
+ # Nnz_q, output_num_heads, head_dim_qk
453
+ # )
454
+ # else:
455
+ # if not q_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(q_t):
456
+ # q_t = q_t.contiguous()
457
+ # # If we are broadcasting then Nnz_q will be the output_batch_size since
458
+ # # seq_len is 1
459
+ # effective_batch_size_q = (
460
+ # output_batch_size if q_batch_size_needs_broadcast else Nnz_q
461
+ # )
462
+ # query_buffer_reshaped = _view_as_dense(
463
+ # q_t, effective_batch_size_q, output_num_heads, head_dim_qk
464
+ # )
465
+
466
+ # # If the physical layout of the NestedTensor's storage
467
+ # # is not: batch, {seq_len}, num_heads, head_dim then we need
468
+ # # to call contiguous
469
+ # if not k_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(k_t):
470
+ # k_t = k_t.contiguous()
471
+ # if not v_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(v_t):
472
+ # v_t = v_t.contiguous()
473
+
474
+ # effective_batch_size_k = (
475
+ # output_batch_size if k_batch_size_needs_broadcast else Nnz_kv
476
+ # )
477
+ # key_buffer_reshaped = _view_as_dense(
478
+ # k_t, effective_batch_size_k, output_num_heads, head_dim_qk
479
+ # )
480
+
481
+ # effective_batch_size_v = (
482
+ # output_batch_size if v_batch_size_needs_broadcast else Nnz_kv
483
+ # )
484
+ # value_buffer_reshaped = _view_as_dense(
485
+ # v_t, effective_batch_size_v, output_num_heads, head_dim_v
486
+ # )
487
+
488
+ # if not q_batch_size_needs_broadcast:
489
+ # output_shape = q_t._size
490
+ # if head_dim_v != head_dim_qk:
491
+ # output_shape[-1] = head_dim_v
492
+ # if q_num_heads_needs_broadcast:
493
+ # output_shape[1] = output_num_heads
494
+ # else:
495
+ # output_shape = torch.empty(3, dtype=torch.int64, device=torch.device("cpu"))
496
+ # output_shape[0] = q_t.size(1)
497
+ # output_shape[1] = output_num_heads
498
+ # output_shape[2] = head_dim_v
499
+
500
+ # return (
501
+ # query_buffer_reshaped,
502
+ # key_buffer_reshaped,
503
+ # value_buffer_reshaped,
504
+ # cumulative_sequence_length_q,
505
+ # cumulative_sequence_length_kv,
506
+ # max_seqlen_batch_q,
507
+ # max_seqlen_batch_kv,
508
+ # output_shape,
509
+ # )
510
+
511
+
512
+ def _sdpa_nested_preprocessing(query, key, value):
513
+ # Query (Batch x Num_heads x {Q_seq_len} x Dim_per_head)
514
+ # Key (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
515
+ # Value (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
516
+ q_batch_size = query.size(0)
517
+ k_batch_size = key.size(0)
518
+ v_batch_size = value.size(0)
519
+
520
+ q_num_heads = query.size(1)
521
+ k_num_heads = key.size(1)
522
+ v_num_heads = value.size(1)
523
+
524
+ if not (q_batch_size == k_batch_size and q_batch_size == v_batch_size) or not (
525
+ q_num_heads == k_num_heads and k_num_heads == v_num_heads
526
+ ):
527
+ raise RuntimeError(
528
+ "This path is currently not implemented for jagged layout NT."
529
+ )
530
+ # return _sdpa_nested_preprocessing_with_broadcast(query, key, value)
531
+
532
+ num_heads = query.size(1)
533
+ head_dim_qk = query.size(3)
534
+ head_dim_v = value.size(3)
535
+ q_t = query.transpose(1, 2)
536
+ k_t = key.transpose(1, 2)
537
+ v_t = value.transpose(1, 2)
538
+
539
+ (
540
+ cumulative_sequence_length_q,
541
+ max_seqlen_batch_q,
542
+ Nnz_q,
543
+ ) = _cumulative_and_max_seq_len_nnz(q_t)
544
+ (
545
+ cumulative_sequence_length_kv,
546
+ max_seqlen_batch_kv,
547
+ Nnz_kv,
548
+ ) = _cumulative_and_max_seq_len_nnz(k_t)
549
+
550
+ # [TODO] K and V have to have the same Nnz, should probably torch_check
551
+ # assume in order to not iterate over v
552
+
553
+ # If the physical layout of the NestedTensor's storage
554
+ # is not: batch, {seq_len}, num_heads, head_dim then we need
555
+ # to call contiguous
556
+ if not q_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(q_t):
557
+ q_t = q_t.contiguous()
558
+ if not k_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(k_t):
559
+ k_t = k_t.contiguous()
560
+ if not v_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(v_t):
561
+ v_t = v_t.contiguous()
562
+
563
+ query_buffer_reshaped = _view_as_dense(q_t, Nnz_q, num_heads, head_dim_qk)
564
+ key_buffer_reshaped = _view_as_dense(k_t, Nnz_kv, num_heads, head_dim_qk)
565
+ value_buffer_reshaped = _view_as_dense(v_t, Nnz_kv, num_heads, head_dim_v)
566
+
567
+ output_nt_info = {
568
+ "offsets": q_t.offsets(),
569
+ "_max_seqlen": q_t._max_seqlen,
570
+ "_min_seqlen": q_t._min_seqlen,
571
+ }
572
+
573
+ return (
574
+ query_buffer_reshaped,
575
+ key_buffer_reshaped,
576
+ value_buffer_reshaped,
577
+ cumulative_sequence_length_q,
578
+ cumulative_sequence_length_kv,
579
+ max_seqlen_batch_q,
580
+ max_seqlen_batch_kv,
581
+ output_nt_info,
582
+ )
583
+
584
+
585
+ def _pad_last_dim(
586
+ tensor: torch.Tensor, alignment_size: int, slice: bool
587
+ ) -> torch.Tensor:
588
+ # FlashAttentionV2 requires that head dimension be a multiple of 8
589
+ # This was previously done within the kernel, however
590
+ # This causes the kernel to maybe alias query, key, value
591
+ # So instead we pad the head_dimensions to be a multiple of 8
592
+ # in the composite region
593
+ last_dim_size = tensor.size(-1)
594
+ if last_dim_size % alignment_size == 0:
595
+ return tensor
596
+ pad_count = alignment_size - (last_dim_size % alignment_size)
597
+ tensor = torch.nn.functional.pad(tensor, [0, pad_count])
598
+ if slice:
599
+ return tensor[..., 0:last_dim_size]
600
+ return tensor
601
+
602
+
603
+ # TODO: coalesce with torch/nn/utils/attention.py
604
+ def _calculate_scale(query, scale):
605
+ # TODO: Investigate why math.sqrt() isn't properly handled by Dynamo?
606
+ softmax_scale = scale if scale is not None else torch.sym_sqrt(1.0 / query.size(-1))
607
+ return softmax_scale
608
+
609
+
610
+ def _post_process_flash_output(out: torch.Tensor, og_size):
611
+ if not out.is_nested and out.size(-1) != og_size:
612
+ out = out[..., 0:og_size]
613
+ return out
614
+
615
+
616
+ def jagged_scaled_dot_product_attention(
617
+ query: torch.Tensor,
618
+ key: torch.Tensor,
619
+ value: torch.Tensor,
620
+ attn_mask: Optional[torch.Tensor] = None,
621
+ dropout_p=0.0,
622
+ is_causal=False,
623
+ scale=None,
624
+ ):
625
+ _validate_sdpa_input(query, key, value, attn_mask, dropout_p, is_causal, scale)
626
+ # for mypy, ugh
627
+ assert (
628
+ isinstance(query, NestedTensor)
629
+ and isinstance(key, NestedTensor)
630
+ and isinstance(value, NestedTensor)
631
+ )
632
+
633
+ # Special path for non-ragged sequence length (e.g. for SAM where we have a ragged
634
+ # second batch dim instead). For this case, we can just send the dense buffers through
635
+ # vanilla SDPA.
636
+ if query.dim() > 3 and key.dim() > 3 and value.dim() > 3 and query._ragged_idx == 1:
637
+ from torch.nested._internal.ops import extract_kwargs
638
+
639
+ output = F.scaled_dot_product_attention(
640
+ query._values,
641
+ key._values,
642
+ value._values,
643
+ attn_mask=(
644
+ attn_mask._values if isinstance(attn_mask, NestedTensor) else attn_mask
645
+ ),
646
+ dropout_p=dropout_p,
647
+ is_causal=is_causal,
648
+ scale=scale,
649
+ )
650
+
651
+ return NestedTensor(output, **extract_kwargs(query))
652
+
653
+ compute_logsumexp = query.requires_grad or key.requires_grad or value.requires_grad
654
+
655
+ backend_choice = _select_sdp_backend(
656
+ query, key, value, attn_mask, dropout_p, is_causal
657
+ )
658
+
659
+ if backend_choice == SDPBackend.FLASH_ATTENTION:
660
+ og_size = query.size(-1)
661
+ query_padded = _pad_last_dim(query, 8, False)
662
+ key_padded = _pad_last_dim(key, 8, False)
663
+ value_padded = _pad_last_dim(value, 8, False)
664
+ # We need to calculate the scale based off the OG head dim size
665
+ og_scale = _calculate_scale(query, scale)
666
+ (
667
+ query_buffer_reshaped,
668
+ key_buffer_reshaped,
669
+ value_buffer_reshaped,
670
+ cumulative_sequence_length_q,
671
+ cumulative_sequence_length_kv,
672
+ max_seqlen_batch_q,
673
+ max_seqlen_batch_kv,
674
+ output_nt_info,
675
+ ) = _sdpa_nested_preprocessing(query_padded, key_padded, value_padded)
676
+
677
+ (
678
+ attention,
679
+ logsumexp,
680
+ philox_seed,
681
+ philox_offset,
682
+ debug_attn_mask,
683
+ ) = torch.ops.aten._flash_attention_forward(
684
+ query_buffer_reshaped,
685
+ key_buffer_reshaped,
686
+ value_buffer_reshaped,
687
+ cumulative_sequence_length_q,
688
+ cumulative_sequence_length_kv,
689
+ max_seqlen_batch_q,
690
+ max_seqlen_batch_kv,
691
+ dropout_p,
692
+ is_causal,
693
+ False,
694
+ scale=og_scale,
695
+ )
696
+ # Reshape output to convert nnz to batch_size and seq_len
697
+ from torch.nested._internal.nested_tensor import nested_view_from_values_offsets
698
+
699
+ attention = nested_view_from_values_offsets(
700
+ attention.squeeze(0), output_nt_info["offsets"]
701
+ ).transpose(1, 2)
702
+ return _post_process_flash_output(attention, og_size)
703
+ elif backend_choice == SDPBackend.EFFICIENT_ATTENTION:
704
+ (
705
+ query_reshaped,
706
+ key_reshaped,
707
+ value_reshaped,
708
+ cumulative_sequence_length_q,
709
+ cumulative_sequence_length_kv,
710
+ max_seqlen_batch_q,
711
+ max_seqlen_batch_kv,
712
+ output_nt_info,
713
+ ) = _sdpa_nested_preprocessing(query, key, value)
714
+ (
715
+ attention,
716
+ log_sumexp,
717
+ seed,
718
+ offset,
719
+ max_seqlen_q,
720
+ max_seqlen_batch_kv,
721
+ ) = torch.ops.aten._efficient_attention_forward(
722
+ query_reshaped.unsqueeze(0),
723
+ key_reshaped.unsqueeze(0),
724
+ value_reshaped.unsqueeze(0),
725
+ None,
726
+ cumulative_sequence_length_q,
727
+ cumulative_sequence_length_kv,
728
+ max_seqlen_batch_q,
729
+ max_seqlen_batch_kv,
730
+ dropout_p,
731
+ int(is_causal),
732
+ compute_logsumexp,
733
+ scale=scale,
734
+ )
735
+
736
+ # Reshape output to convert nnz to batch_size and seq_len
737
+ from torch.nested._internal.nested_tensor import nested_view_from_values_offsets
738
+
739
+ return nested_view_from_values_offsets(
740
+ attention.squeeze(0), output_nt_info["offsets"]
741
+ ).transpose(1, 2)
742
+ elif backend_choice == SDPBackend.MATH:
743
+ # save the offsets and shape of the inputs, so we can reshape the final output
744
+ # query @ key = attn: [B, D1, j0, D'] @ [B, D1, D' j1] = [B, D1, j0, j1]
745
+ # attn @ value = out: [B, D1, j0, j1] @ [B, D1, j1, D2] = [B, D1, j0, D2]
746
+ offsets = query.offsets()
747
+ d1 = query._size[1]
748
+ d2 = value._size[-1]
749
+
750
+ # convert jagged layout Nested Tensor to strided layout Nested Tensor
751
+ # which support the math implementation of SDPA
752
+ def get_strided_layout_nested_tensor(jagged_layout_nt):
753
+ lengths = jagged_layout_nt._offsets[1:] - jagged_layout_nt._offsets[:-1]
754
+ transpose = torch.transpose(jagged_layout_nt, 1, 2)
755
+ tensor_list = transpose.values().split(list(lengths), dim=0)
756
+ strided_nt = torch.nested.as_nested_tensor(list(tensor_list))
757
+ strided_nt = strided_nt.transpose(1, 2).contiguous()
758
+ return strided_nt
759
+
760
+ query = get_strided_layout_nested_tensor(query)
761
+ key = get_strided_layout_nested_tensor(key)
762
+ value = get_strided_layout_nested_tensor(value)
763
+
764
+ attn_out = torch._scaled_dot_product_attention_math(
765
+ query, key, value, attn_mask, dropout_p, is_causal, scale=scale
766
+ )[0]
767
+
768
+ from torch.nested._internal.nested_tensor import nested_view_from_values_offsets
769
+
770
+ # convert strided layout Nested Tensor back to jagged layout Nested Tensor
771
+ attn_out = attn_out.transpose(1, 2).contiguous().values()
772
+ attn_out = attn_out.view(-1, d1, d2)
773
+ attn_out = nested_view_from_values_offsets(attn_out, offsets)
774
+ attn_out = attn_out.transpose(1, 2)
775
+
776
+ return attn_out
777
+ else:
778
+ raise RuntimeError(
779
+ "No viable backend for scaled_dot_product_attention was found."
780
+ )
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from .patcher import ONNXTorchPatcher
2
+ from .serialization import save_model_with_external_data
3
+
4
+
5
+ __all__ = [
6
+ "save_model_with_external_data",
7
+ "ONNXTorchPatcher",
8
+ ]
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (339 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/_pass.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/decomposition_skip.cpython-310.pyc ADDED
Binary file (6.28 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/decomposition_table.cpython-310.pyc ADDED
Binary file (3.01 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/diagnostics.cpython-310.pyc ADDED
Binary file (7.88 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/dynamo_graph_extractor.cpython-310.pyc ADDED
Binary file (8.19 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/fx_onnx_interpreter.cpython-310.pyc ADDED
Binary file (17.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/fx_symbolic_graph_extractor.cpython-310.pyc ADDED
Binary file (7.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/onnxfunction_dispatcher.cpython-310.pyc ADDED
Binary file (24.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/op_validation.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/patcher.cpython-310.pyc ADDED
Binary file (4.02 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/registration.cpython-310.pyc ADDED
Binary file (3.39 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/serialization.cpython-310.pyc ADDED
Binary file (6.43 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/torch_export_graph_extractor.cpython-310.pyc ADDED
Binary file (3.05 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/__pycache__/type_utils.cpython-310.pyc ADDED
Binary file (6.55 kB). View file
 
venv/lib/python3.10/site-packages/torch/onnx/_internal/fx/_pass.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+
5
+ import contextlib
6
+ import dataclasses
7
+ import difflib
8
+
9
+ import io
10
+ import logging
11
+ import sys
12
+
13
+ from typing import Any, Callable, Optional, Tuple
14
+
15
+ import torch
16
+ import torch.fx
17
+ from torch._subclasses import fake_tensor
18
+ from torch.fx.experimental.proxy_tensor import maybe_disable_fake_tensor_mode
19
+ from torch.onnx._internal import _beartype
20
+ from torch.onnx._internal.fx import diagnostics, onnxfunction_dispatcher
21
+
22
+
23
+ @dataclasses.dataclass
24
+ class PackageInfo:
25
+ package_name: str
26
+ version: Optional[str]
27
+ commit_hash: Optional[str]
28
+
29
+ def to_onnx_domain_string(self) -> str:
30
+ return ".".join(
31
+ filter(None, ("pkg", self.package_name, self.version, self.commit_hash))
32
+ )
33
+
34
+ @classmethod
35
+ def from_python_class(cls, python_class: type) -> PackageInfo:
36
+ package_name = python_class.__module__.split(".")[0]
37
+ package = __import__(package_name)
38
+ version = getattr(package, "__version__", None)
39
+ # TODO: Figure out how to retrieve commit hash.
40
+ commit_hash = None
41
+ return cls(package_name, version, commit_hash)
42
+
43
+
44
+ @dataclasses.dataclass
45
+ class GraphModuleOnnxMeta:
46
+ package_info: PackageInfo
47
+
48
+
49
+ @contextlib.contextmanager
50
+ def _patch_difflib_sequence_matcher_init():
51
+ """Context patching `difflib.SequenceMatcher` for fx readable graph.
52
+
53
+ Under this context, the `autojunk` argument of `difflib.SequenceMatcher` will always
54
+ be considered as `False`. This is to prevent `difflib.SequenceMatcher` recognizing
55
+ stacktrace messages in fx readable graph as junk, as these messages tend to be long (>200)
56
+ and repeat multiple times, which falls under the junk filter criteria.
57
+
58
+ `difflib.SequenceMatcher` is used underneath by all sorts of diffing functions
59
+ in `difflib`, including `difflib.unified_diff`, `difflib.ndiff`, `difflib.context_diff`.
60
+ Unfortunately, there is no way to pass `autojunk` argument to these functions, and
61
+ they all default to `True`. This context patching will affect all of them.
62
+
63
+ `Reference: Automatic junk heuristic <https://docs.python.org/3/library/difflib.html>`_
64
+ """
65
+ original_init = difflib.SequenceMatcher.__init__
66
+
67
+ def patched_init(self, isjunk=None, a="", b="", autojunk=True):
68
+ original_init(self, isjunk, a, b, autojunk=False)
69
+
70
+ difflib.SequenceMatcher.__init__ = patched_init # type: ignore[assignment]
71
+ try:
72
+ yield
73
+ finally:
74
+ difflib.SequenceMatcher.__init__ = original_init # type: ignore[assignment]
75
+
76
+
77
+ def _unified_diff(a: str, b: str) -> str:
78
+ """Return a string containing the unified diff of two strings.
79
+
80
+ This function calls a patched version of `difflib.unified_diff` with `autojunk` set
81
+ to `False` for `difflib.SequenceMatcher` class. More details can be found in
82
+ `_patch_difflib_sequence_matcher_init` function.
83
+
84
+ Args:
85
+ a: The first string.
86
+ b: The second string.
87
+
88
+ Returns:
89
+ The unified diff of the two strings. If there is no diff, return "<no diff>".
90
+
91
+ Example::
92
+
93
+ >>> a = '''class GraphModule(torch.nn.Module):
94
+ ... def forward(self, input_ids : torch.Tensor, attention_mask : torch.Tensor):
95
+ ... # File: /modeling.py:770, code: input_ids = input_ids.view(-1, input_shape[-1])
96
+ ... view = input_ids.view(-1, 3); input_ids = None
97
+ ... '''
98
+ >>> b = '''class <lambda>(torch.nn.Module):
99
+ ... def forward(self, input_ids: i64[1, 3], attention_mask: i64[1, 3]):
100
+ ... # File: /modeling.py:770, code: input_ids = input_ids.view(-1, input_shape[-1])
101
+ ... view: i64[1, 3] = torch.ops.aten.view.default(input_ids, [-1, 3]); input_ids = None
102
+ ... '''
103
+ >>> print(_unified_diff(a, b))
104
+ ---
105
+ +++
106
+ @@ -1,4 +1,4 @@
107
+ -class GraphModule(torch.nn.Module):
108
+ - def forward(self, input_ids : torch.Tensor, attention_mask : torch.Tensor):
109
+ +class <lambda>(torch.nn.Module):
110
+ + def forward(self, input_ids: i64[1, 3], attention_mask: i64[1, 3]):
111
+ # File: /modeling.py:770, code: input_ids = input_ids.view(-1, input_shape[-1])
112
+ - view = input_ids.view(-1, 3); input_ids = None
113
+ + view: i64[1, 3] = torch.ops.aten.view.default(input_ids, [-1, 3]); input_ids = None
114
+ """
115
+
116
+ a_list = a.splitlines(keepends=True)
117
+ b_list = b.splitlines(keepends=True)
118
+
119
+ with _patch_difflib_sequence_matcher_init():
120
+ # Set `n` to `sys.maxsize` to show entire graph when there is a diff.
121
+ diff = "".join(difflib.unified_diff(a_list, b_list, n=sys.maxsize))
122
+
123
+ if not diff:
124
+ return "<no diff>"
125
+ return diff
126
+
127
+
128
+ @_beartype.beartype
129
+ def _transform_diagnose_call_message_formatter(
130
+ run: Callable,
131
+ self: Transform,
132
+ *args: Any,
133
+ **kwargs: Any,
134
+ ) -> str:
135
+ return f"Running {self.__class__.__name__} pass. "
136
+
137
+
138
+ def maybe_fx_graph_tabular(graph: torch.fx.Graph) -> Optional[str]:
139
+ """Return the Graph nodes in tabular format. Equivalent to stdout of `graph.print_tabular()`.
140
+ If `tabulate` is not installed, return `None`.
141
+
142
+ Args:
143
+ graph: The Graph to print.
144
+
145
+ Returns:
146
+ The Graph printed in a tabular format. None if `tabulate` is not installed.
147
+ """
148
+ f = io.StringIO()
149
+ with contextlib.redirect_stdout(f):
150
+ try:
151
+ graph.print_tabular()
152
+ except ImportError:
153
+ return None
154
+ return f.getvalue()
155
+
156
+
157
+ class Transform(abc.ABC):
158
+ """Base class for FX graph transformations to be used by FX-ONNX exporter.
159
+
160
+ Similar to `FX Interpreter <https://pytorch.org/docs/stable/fx.html#torch.fx.Interpreter>`_,
161
+ specializations of this class execute the FX graph Node-by-Node.
162
+ Methods in the `Transform` class can be overridden to customize the behavior of the model.
163
+ This pattern can be useful for many things, including writing code transformations as well as analysis passes.
164
+
165
+ The following methods can be overridden::
166
+
167
+ _run()
168
+ +-- run_node()
169
+ +-- placeholder()
170
+ +-- get_attr()
171
+ +-- call_function()
172
+ +-- call_method()
173
+ +-- call_module()
174
+ +-- output()
175
+
176
+ One important aspect to note is that if the transformation modifies the model input and/or output signature,
177
+ (e.g. additional inputs/outputs are added to the model), :class:`InputAdaptStep` and/or :class:`OutputAdaptStep`
178
+ are needed to reconcile :attr:`ONNXProgram.model_signature` and :attr:`ONNXProgram.model_proto`.
179
+ That is, the model signature and the model representation must match.
180
+
181
+ As an additional feature, this class provides builtin support for transformation recording using the diagnostics.
182
+ The granularity of overriding is up to the user. And it affects the granularity of
183
+ the diagnostics information. For example, if `_run()` is overridden, the
184
+ diagnostics information will only contain graph level transformation. Instead,
185
+ if `call_function()` is overridden, the diagnostics information will additionally
186
+ contain the node level information of `call_function()`.
187
+
188
+ TODO(bowbao): Add more overridable methods in call hierarchy
189
+ TODO(bowbao): Create an example once more overridable methods are added.
190
+ """
191
+
192
+ diagnostic_context: diagnostics.DiagnosticContext
193
+ """The diagnostic context for recording diagnostics."""
194
+
195
+ module: torch.fx.GraphModule
196
+ """The module to be transformed."""
197
+
198
+ fake_mode: Optional[fake_tensor.FakeTensorMode]
199
+ """The existing fake mode detected from `self.module`."""
200
+
201
+ def __init__(
202
+ self,
203
+ diagnostic_context: diagnostics.DiagnosticContext,
204
+ module: torch.fx.GraphModule,
205
+ ):
206
+ """Initialize the transform.
207
+
208
+ Args:
209
+ diagnostic_context: The diagnostic context for recording diagnostics.
210
+ module: The module to be transformed.
211
+ """
212
+ self.diagnostic_context = diagnostic_context
213
+ self.module = module
214
+ self.fake_mode = self._detect_fake_mode()
215
+
216
+ def _detect_fake_mode(self) -> Optional[fake_tensor.FakeTensorMode]:
217
+ """Detect fake mode from the graph.
218
+
219
+ Scan through all nodes in graph and their meta['val'] to detect fake mode.
220
+ """
221
+ fake_tensors = [node.meta.get("val") for node in self.module.graph.nodes]
222
+ with maybe_disable_fake_tensor_mode():
223
+ return torch._dynamo.utils.detect_fake_mode(fake_tensors)
224
+
225
+ def _maybe_fakefy_args(
226
+ self, fake_mode: Optional[fake_tensor.FakeTensorMode], *args: Any
227
+ ) -> Tuple[Any, ...]:
228
+ if fake_mode is None:
229
+ return args
230
+ # NB: This should hit the cache if tensors were fakefied before.
231
+ # E.g., when the fx graph is produced by Dynamo.
232
+ return tuple(
233
+ fake_mode.from_tensor(t) if isinstance(t, torch.Tensor) else t for t in args
234
+ )
235
+
236
+ @abc.abstractmethod
237
+ def _run(self, *args, **kwargs) -> torch.fx.GraphModule:
238
+ ...
239
+
240
+ @diagnostics.diagnose_call(
241
+ diagnostics.rules.fx_pass,
242
+ diagnostic_message_formatter=_transform_diagnose_call_message_formatter,
243
+ )
244
+ def run(self, *args, **kwargs) -> torch.fx.GraphModule:
245
+ """Run the transform on `self.module`.
246
+
247
+ Note that this method may or may not mutate `self.module`, and the returned
248
+ `GraphModule` could be either `self.module` or a new `GraphModule`.
249
+
250
+ Args:
251
+ *args: Positional arguments for `self.module` to run.
252
+ **kwargs: Keyword arguments for `self.module` to run.
253
+ """
254
+ diagnostic = self.diagnostic_context.inflight_diagnostic(
255
+ rule=diagnostics.rules.fx_pass
256
+ )
257
+ diagnostic.info(
258
+ "For detailed logging of graph modifications by this pass, either set "
259
+ "`DiagnosticOptions.verbosity_level` to `logging.DEBUG` or use the environment variable "
260
+ "`TORCH_LOGS='onnx_diagnostics'`."
261
+ )
262
+
263
+ # Gather graph information before transform.
264
+ graph_diff_log_level = logging.DEBUG
265
+ if diagnostic.logger.isEnabledFor(graph_diff_log_level):
266
+ # Cannot use LazyString because the graph may have been mutated at evaluation time.
267
+ old_readable_graph = self.module.print_readable(print_output=False)
268
+ old_tabular = maybe_fx_graph_tabular(self.module.graph)
269
+ else:
270
+ # Set to empty string to avoid unbound warning. This value should never be
271
+ # used since the log level is not enabled.
272
+ old_readable_graph = ""
273
+ old_tabular = ""
274
+
275
+ module = self._run(*args, **kwargs)
276
+
277
+ # Gather graph information after transform.
278
+ if diagnostic.logger.isEnabledFor(graph_diff_log_level):
279
+ new_readable_graph = module.print_readable(print_output=False)
280
+ new_tabular = maybe_fx_graph_tabular(module.graph)
281
+
282
+ with diagnostic.log_section(graph_diff_log_level, "Graph diff:"):
283
+ diagnostic.log(
284
+ graph_diff_log_level,
285
+ "```\n%s\n```",
286
+ diagnostics.LazyString(
287
+ _unified_diff, old_readable_graph, new_readable_graph
288
+ ),
289
+ )
290
+
291
+ with diagnostic.log_section(graph_diff_log_level, "Tabular diff:"):
292
+ if old_tabular is None or new_tabular is None:
293
+ diagnostic.log(
294
+ graph_diff_log_level,
295
+ "Tabular diff is not available because `tabulate` is not installed.",
296
+ )
297
+ else:
298
+ diagnostic.log(
299
+ graph_diff_log_level,
300
+ "```\n%s\n```",
301
+ diagnostics.LazyString(_unified_diff, old_tabular, new_tabular),
302
+ )
303
+
304
+ return module
305
+
306
+
307
+ class AnalysisResult(abc.ABC): # noqa: B024
308
+ ...
309
+
310
+
311
+ class Analysis(abc.ABC):
312
+ @_beartype.beartype
313
+ def __init__(
314
+ self,
315
+ diagnostic_context: diagnostics.DiagnosticContext,
316
+ module: torch.fx.GraphModule,
317
+ onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,
318
+ ):
319
+ self.diagnostic_context = diagnostic_context
320
+ self.module = module
321
+ self.onnxfunction_dispatcher = onnxfunction_dispatcher
322
+
323
+ @abc.abstractmethod
324
+ def analyze(self, diagnostic_level: diagnostics.infra.Level) -> AnalysisResult:
325
+ ...