applied-ai-018 commited on
Commit
b81136f
·
verified ·
1 Parent(s): 256625a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/_C/_autograd.pyi +123 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/_C/_cpu.pyi +5 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/_C/_cudnn.pyi +17 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi +26 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi +590 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi +188 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi +35 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/_C/_functorch.pyi +77 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/_C/_nn.pyi +86 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/_C/_onnx.pyi +40 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__init__.py +4 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_api.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_collectives.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/fully_shard.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/replicate.py +154 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/pointwise_ops.py +629 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/tensor_ops.py +826 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/utils.py +226 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py +375 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/__init__.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/error_handler.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/handlers.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/error_handler.py +159 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/handlers.py +16 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/redirects.py +102 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__init__.py +15 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/handlers.py +32 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py +78 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__init__.py +14 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/__init__.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/api.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/api.py +283 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__init__.py +34 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/__init__.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adadelta.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adagrad.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adam.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adamw.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rprop.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/named_optimizer.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/post_localSGD_optimizer.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/utils.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/apply_optimizer_in_backward.py +118 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adadelta.py +102 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adagrad.py +104 -0
llmeval-env/lib/python3.10/site-packages/torch/_C/_autograd.pyi ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import Any, Callable, List, Optional, Set
3
+
4
+ import torch
5
+
6
+ from ._profiler import (
7
+ _ProfilerEvent,
8
+ ActiveProfilerType,
9
+ ProfilerActivity,
10
+ ProfilerConfig,
11
+ )
12
+
13
+ # Defined in tools/autograd/init.cpp
14
+
15
+ class DeviceType(Enum):
16
+ CPU = ...
17
+ CUDA = ...
18
+ MKLDNN = ...
19
+ OPENGL = ...
20
+ OPENCL = ...
21
+ IDEEP = ...
22
+ HIP = ...
23
+ FPGA = ...
24
+ ORT = ...
25
+ XLA = ...
26
+ MPS = ...
27
+ HPU = ...
28
+ Meta = ...
29
+ Vulkan = ...
30
+ Metal = ...
31
+ PrivateUse1 = ...
32
+
33
+ class ProfilerEvent:
34
+ def cpu_elapsed_us(self, other: ProfilerEvent) -> float: ...
35
+ def cpu_memory_usage(self) -> int: ...
36
+ def cuda_elapsed_us(self, other: ProfilerEvent) -> float: ...
37
+ def privateuse1_elapsed_us(self, other: ProfilerEvent) -> float: ...
38
+ def cuda_memory_usage(self) -> int: ...
39
+ def device(self) -> int: ...
40
+ def handle(self) -> int: ...
41
+ def has_cuda(self) -> bool: ...
42
+ def is_remote(self) -> bool: ...
43
+ def kind(self) -> int: ...
44
+ def name(self) -> str: ...
45
+ def node_id(self) -> int: ...
46
+ def sequence_nr(self) -> int: ...
47
+ def shapes(self) -> List[List[int]]: ...
48
+ def thread_id(self) -> int: ...
49
+ def flops(self) -> float: ...
50
+ def is_async(self) -> bool: ...
51
+
52
+ class _KinetoEvent:
53
+ def name(self) -> str: ...
54
+ def device_index(self) -> int: ...
55
+ def start_us(self) -> int: ...
56
+ def duration_us(self) -> int: ...
57
+ def is_async(self) -> bool: ...
58
+ def linked_correlation_id(self) -> int: ...
59
+ def shapes(self) -> List[List[int]]: ...
60
+ def dtypes(self) -> List[str]: ...
61
+ def concrete_inputs(self) -> List[Any]: ...
62
+ def device_type(self) -> DeviceType: ...
63
+ def start_thread_id(self) -> int: ...
64
+ def end_thread_id(self) -> int: ...
65
+ def correlation_id(self) -> int: ...
66
+ def fwd_thread_id(self) -> int: ...
67
+ def stack(self) -> List[str]: ...
68
+ def scope(self) -> int: ...
69
+ def sequence_nr(self) -> int: ...
70
+ def flops(self) -> int: ...
71
+ def cuda_elapsed_us(self) -> int: ...
72
+ def privateuse1_elapsed_us(self) -> int: ...
73
+
74
+ class _ProfilerResult:
75
+ def events(self) -> List[_KinetoEvent]: ...
76
+ def legacy_events(self) -> List[List[ProfilerEvent]]: ...
77
+ def save(self, path: str) -> None: ...
78
+ def experimental_event_tree(self) -> List[_ProfilerEvent]: ...
79
+ def trace_start_us(self) -> int: ...
80
+
81
+ class SavedTensor: ...
82
+
83
+ def _enable_profiler(
84
+ config: ProfilerConfig,
85
+ activities: Set[ProfilerActivity],
86
+ ) -> None: ...
87
+ def _prepare_profiler(
88
+ config: ProfilerConfig,
89
+ activities: Set[ProfilerActivity],
90
+ ) -> None: ...
91
+ def _disable_profiler() -> _ProfilerResult: ...
92
+ def _profiler_enabled() -> bool: ...
93
+ def _add_metadata_json(key: str, value: str) -> None: ...
94
+ def _kineto_step() -> None: ...
95
+ def _get_sequence_nr() -> int: ...
96
+ def kineto_available() -> bool: ...
97
+ def _record_function_with_args_enter(name: str, *args) -> torch.Tensor: ...
98
+ def _record_function_with_args_exit(handle: torch.Tensor) -> None: ...
99
+ def _supported_activities() -> Set[ProfilerActivity]: ...
100
+ def _enable_record_function(enable: bool) -> None: ...
101
+ def _set_empty_test_observer(is_global: bool, sampling_prob: float) -> None: ...
102
+ def _push_saved_tensors_default_hooks(
103
+ pack_hook: Callable[[torch.Tensor], Any],
104
+ unpack_hook: Callable[[Any], torch.Tensor],
105
+ ) -> None: ...
106
+ def _pop_saved_tensors_default_hooks() -> None: ...
107
+ def _unsafe_set_version_counter(t: torch.Tensor, prev_version: int) -> None: ...
108
+ def _enable_profiler_legacy(config: ProfilerConfig) -> None: ...
109
+ def _disable_profiler_legacy() -> List[List[ProfilerEvent]]: ...
110
+ def _profiler_type() -> ActiveProfilerType: ...
111
+ def _saved_tensors_hooks_enable() -> None: ...
112
+ def _saved_tensors_hooks_disable(message: str) -> None: ...
113
+ def _saved_tensors_hooks_get_disabled_error_message() -> Optional[str]: ...
114
+
115
+ class CreationMeta(Enum):
116
+ DEFAULT = ...
117
+ IN_CUSTOM_FUNCTION = ...
118
+ MULTI_OUTPUT_NODE = ...
119
+ NO_GRAD_MODE = ...
120
+ INFERENCE_MODE = ...
121
+
122
+ def _set_creation_meta(t: torch.Tensor, creation_meta: CreationMeta) -> None: ...
123
+ def _get_creation_meta(t: torch.Tensor) -> CreationMeta: ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_cpu.pyi ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from torch.types import _bool
2
+
3
+ # Defined in torch/csrc/cpu/Module.cpp
4
+
5
+ def _is_cpu_support_vnni() -> _bool: ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_cudnn.pyi ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+ from torch.types import _bool, Tuple
4
+
5
+ # Defined in torch/csrc/cuda/shared/cudnn.cpp
6
+ is_cuda: _bool
7
+
8
+ def getRuntimeVersion() -> Tuple[int, int, int]: ...
9
+ def getCompileVersion() -> Tuple[int, int, int]: ...
10
+ def getVersionInt() -> int: ...
11
+
12
+ class RNNMode(int, Enum):
13
+ value: int
14
+ rnn_relu = ...
15
+ rnn_tanh = ...
16
+ lstm = ...
17
+ gru = ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Set
2
+
3
+ import torch
4
+
5
+ # This module is defined in torch/csrc/distributed/autograd/init.cpp
6
+
7
+ class DistAutogradContext:
8
+ def _context_id(self) -> int: ...
9
+ def _recv_functions(self) -> Dict[int, Any]: ...
10
+ def _send_functions(self) -> Dict[int, Any]: ...
11
+ def _known_worker_ids(self) -> Set[int]: ...
12
+
13
+ def _new_context() -> DistAutogradContext: ...
14
+ def _release_context(context_id: int) -> None: ...
15
+ def _get_max_id() -> int: ...
16
+ def _is_valid_context(worker_id: int) -> bool: ...
17
+ def _retrieve_context(context_id: int) -> DistAutogradContext: ...
18
+ def _current_context() -> DistAutogradContext: ...
19
+ def _init(worker_id: int) -> None: ...
20
+ def _get_debug_info() -> Dict[str, str]: ...
21
+ def backward(
22
+ context_id: int,
23
+ roots: List[torch.Tensor],
24
+ retain_graph=False,
25
+ ) -> None: ...
26
+ def get_gradients(context_id: int) -> Dict[torch.Tensor, torch.Tensor]: ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="type-arg"
2
+ from datetime import timedelta
3
+ from enum import Enum
4
+ from typing import Any, Dict, List, Optional, overload, Tuple, Union
5
+
6
+ import torch
7
+ from torch import Tensor
8
+ from torch._C import ScriptObject
9
+ from torch.futures import Future
10
+
11
+ # This module is defined in torch/csrc/distributed/c10d/init.cpp
12
+
13
+ _DEFAULT_FIRST_BUCKET_BYTES: int
14
+ _DEFAULT_NO_TIMEOUT: timedelta
15
+ _DEFAULT_PG_TIMEOUT: timedelta
16
+ _DEFAULT_PG_NCCL_TIMEOUT: timedelta
17
+
18
+ class BuiltinCommHookType(Enum):
19
+ ALLREDUCE = ...
20
+ FP16_COMPRESS = ...
21
+
22
+ def _register_comm_hook(reducer: Reducer, state: Any, comm_hook: Any): ...
23
+ def _register_builtin_comm_hook(
24
+ reducer: Reducer,
25
+ comm_hook_type: BuiltinCommHookType,
26
+ ): ...
27
+ def _set_global_rank(rank: int) -> None: ...
28
+ def _hash_tensors(tensors: List[Tensor]) -> int: ...
29
+
30
+ class GradBucket:
31
+ def index(self) -> int: ...
32
+ def buffer(self) -> Tensor: ...
33
+ def gradients(self) -> List[Tensor]: ...
34
+ def is_last(self) -> bool: ...
35
+ def set_buffer(self, tensor: Tensor) -> None: ...
36
+ def parameters(self) -> List[Tensor]: ...
37
+
38
+ class Reducer:
39
+ def __init__(
40
+ self,
41
+ params: List[Tensor],
42
+ bucket_indices: List[List[int]],
43
+ per_bucket_size_limits: List[int],
44
+ process_group: ProcessGroup,
45
+ expect_sparse_gradients: List[bool] = ...,
46
+ bucket_bytes_cap: int = ..., # kDefaultBucketBytesCap in reducer.hpp
47
+ find_unused_parameters: bool = ...,
48
+ gradient_as_bucket_view: bool = ...,
49
+ param_to_name_mapping: Dict[int, str] = ...,
50
+ first_bucket_types_cap: int = ..., # kDefaultFirstBucketBytes in reducer.hpp
51
+ ): ...
52
+ def prepare_for_forward(self) -> None: ...
53
+ def prepare_for_backward(self, output: List[Tensor]) -> None: ...
54
+ def get_backward_stats(self) -> List[int]: ...
55
+ def _install_post_backward_futures(self, futures: List[Future]) -> None: ...
56
+ def _rebuild_buckets(self) -> bool: ...
57
+ def _get_zeros_like_grad_buckets(self) -> List[GradBucket]: ...
58
+ def _push_all_rebuilt_params(self) -> None: ...
59
+ def _set_forward_pass_work_handle(
60
+ self,
61
+ work: Work,
62
+ use_static_world_size: bool,
63
+ ): ...
64
+ def _get_local_used_map(self) -> Tensor: ...
65
+ def _set_ddp_runtime_logging_sample_rate(self, sample_rate: int) -> None: ...
66
+ def _set_static_graph(self) -> None: ...
67
+ def _run_comm_hook(self, bucket: GradBucket) -> Future: ...
68
+ def set_logger(self, logger: Logger) -> None: ...
69
+ def _remove_autograd_hooks(self) -> None: ...
70
+ def _check_reducer_finalized(self) -> None: ...
71
+ def _set_sparse_metadata(self, global_unique_ids: Dict[str, Tensor]) -> None: ...
72
+ def _reset_state(self) -> None: ...
73
+ def _update_process_group(self, new_process_group: ProcessGroup) -> None: ...
74
+
75
+ class DDPLoggingData:
76
+ strs_map: Dict[str, str]
77
+ ints_map: Dict[str, int]
78
+
79
+ class Logger:
80
+ def __init__(self, reducer: Reducer): ...
81
+ def set_construction_data_and_log(
82
+ self,
83
+ module_name: str,
84
+ device_ids: List[int],
85
+ output_device: int,
86
+ broadcast_buffers: bool,
87
+ has_sync_bn: bool,
88
+ static_graph: bool,
89
+ ): ...
90
+ def set_runtime_stats_and_log(self) -> None: ...
91
+ def set_error_and_log(self, error: str) -> None: ...
92
+ def _get_ddp_logging_data(self) -> DDPLoggingData: ...
93
+ def _set_comm_hook_name(self, comm_hook: str) -> None: ...
94
+ def _set_uneven_input_join(self) -> None: ...
95
+ def _set_static_graph(self) -> None: ...
96
+
97
+ def get_debug_level(): ...
98
+ def set_debug_level(): ...
99
+ def set_debug_level_from_env(): ...
100
+
101
+ class DebugLevel(Enum):
102
+ OFF = ...
103
+ INFO = ...
104
+ DETAIL = ...
105
+
106
+ class ReduceOp:
107
+ def __init__(self, op: RedOpType): ...
108
+
109
+ SUM: RedOpType = ...
110
+ AVG: RedOpType = ...
111
+ PRODUCT: RedOpType = ...
112
+ MIN: RedOpType = ...
113
+ MAX: RedOpType = ...
114
+ BAND: RedOpType = ...
115
+ BOR: RedOpType = ...
116
+ BXOR: RedOpType = ...
117
+ PREMUL_SUM: RedOpType = ...
118
+ UNUSED: RedOpType = ...
119
+
120
+ class RedOpType(Enum): ...
121
+
122
+ class BroadcastOptions:
123
+ rootRank: int
124
+ rootTensor: int
125
+ timeout: timedelta
126
+ asyncOp: bool
127
+
128
+ class AllreduceOptions:
129
+ reduceOp: ReduceOp
130
+ timeout: timedelta
131
+
132
+ class AllreduceCoalescedOptions(AllreduceOptions): ...
133
+
134
+ class ReduceOptions:
135
+ reduceOp: ReduceOp
136
+ rootRank: int
137
+ rootTensor: int
138
+ timeout: timedelta
139
+
140
+ class AllgatherOptions:
141
+ timeout: timedelta
142
+ asyncOp: bool
143
+
144
+ class GatherOptions:
145
+ rootRank: int
146
+ timeout: timedelta
147
+
148
+ class ScatterOptions:
149
+ rootRank: int
150
+ timeout: timedelta
151
+ asyncOp: bool
152
+
153
+ class ReduceScatterOptions:
154
+ reduceOp: ReduceOp
155
+ timeout: timedelta
156
+ asyncOp: bool
157
+
158
+ class BarrierOptions:
159
+ device_ids: List[int]
160
+ device: torch.device
161
+ timeout: timedelta
162
+
163
+ class AllToAllOptions:
164
+ timeout: timedelta
165
+
166
+ class Store:
167
+ def set(self, key: str, value: str): ...
168
+ def get(self, key: str) -> bytes: ...
169
+ def add(self, key: str, value: int) -> int: ...
170
+ def compare_set(
171
+ self,
172
+ key: str,
173
+ expected_value: str,
174
+ desired_value: str,
175
+ ) -> bytes: ...
176
+ def delete_key(self, key: str) -> bool: ...
177
+ def num_keys(self) -> int: ...
178
+ def set_timeout(self, timeout: timedelta): ...
179
+ @overload
180
+ def wait(self, keys: List[str]): ...
181
+ @overload
182
+ def wait(self, keys: List[str], timeout: timedelta): ...
183
+
184
+ class FileStore(Store):
185
+ def __init__(self, path: str, numWorkers: int = ...): ...
186
+
187
+ class HashStore(Store):
188
+ def __init__(self): ...
189
+
190
+ class TCPStore(Store):
191
+ def __init__(
192
+ self,
193
+ host_name: str,
194
+ port: int,
195
+ world_size: Optional[int] = ...,
196
+ is_master: bool = ...,
197
+ timeout: timedelta = ...,
198
+ wait_for_workers: bool = ...,
199
+ multi_tenant: bool = ...,
200
+ master_listen_fd: Optional[int] = ...,
201
+ use_libuv: Optional[bool] = ...,
202
+ ): ...
203
+ @property
204
+ def host(self) -> str: ...
205
+ @property
206
+ def port(self) -> int: ...
207
+
208
+ class PrefixStore(Store):
209
+ def __init__(self, prefix: str, store: Store): ...
210
+ @property
211
+ def underlying_store(self) -> Store: ...
212
+
213
+ class _DistributedBackendOptions:
214
+ def __init__(self): ...
215
+ @property
216
+ def store(self) -> Store: ...
217
+ @store.setter
218
+ def store(self, store: Store) -> None: ...
219
+ @property
220
+ def group_rank(self) -> int: ...
221
+ @group_rank.setter
222
+ def group_rank(self, rank: int) -> None: ...
223
+ @property
224
+ def group_size(self) -> int: ...
225
+ @group_size.setter
226
+ def group_size(self, size: int) -> None: ...
227
+ @property
228
+ def timeout(self) -> timedelta: ...
229
+ @timeout.setter
230
+ def timeout(self, timeout: timedelta) -> None: ...
231
+ @property
232
+ def group_id(self) -> str: ...
233
+ @group_id.setter
234
+ def group_id(self, group_id: str) -> None: ...
235
+ @property
236
+ def global_ranks_in_group(self) -> List[int]: ...
237
+ @global_ranks_in_group.setter
238
+ def global_ranks_in_group(self, ranks: List[int]) -> None: ...
239
+
240
+ class Work:
241
+ def is_completed(self) -> bool: ...
242
+ def is_success(self) -> bool: ...
243
+ def exception(self) -> Any: ...
244
+ def wait(self, timeout: timedelta = ...) -> bool: ...
245
+ def get_future(self) -> Future: ...
246
+ def source_rank(self) -> int: ...
247
+ def _source_rank(self) -> int: ...
248
+ def result(self) -> List[Tensor]: ...
249
+ def synchronize(self): ...
250
+ def boxed(self) -> ScriptObject: ...
251
+ @staticmethod
252
+ def unbox(obj: ScriptObject) -> Work: ...
253
+
254
+ class Backend:
255
+ def __init__(
256
+ self,
257
+ rank: int,
258
+ size: int,
259
+ ): ...
260
+ @property
261
+ def supports_splitting(self) -> bool: ...
262
+ def rank(self) -> int: ...
263
+ def size(self) -> int: ...
264
+ def eager_connect_single_device(self, device: Optional[torch.device]) -> None: ...
265
+ def _set_sequence_number_for_group(self) -> None: ...
266
+
267
+ class ProcessGroup:
268
+ class Options:
269
+ def __init__(self, backend: str, timeout: timedelta = ...): ...
270
+ @property
271
+ def backend(self) -> str: ...
272
+ @property
273
+ def _timeout(self) -> timedelta: ...
274
+ @_timeout.setter
275
+ def _timeout(self, val: timedelta) -> None: ...
276
+
277
+ class BackendType(Enum):
278
+ UNDEFINED = ...
279
+ GLOO = ...
280
+ NCCL = ...
281
+ UCC = ...
282
+ MPI = ...
283
+ CUSTOM = ...
284
+ def __init__(self, store: Store, rank: int, size: int, options: Options): ...
285
+ def rank(self) -> int: ...
286
+ def size(self) -> int: ...
287
+ @overload
288
+ def broadcast(
289
+ self,
290
+ tensors: List[Tensor],
291
+ opts=...,
292
+ ) -> Work: ...
293
+ @overload
294
+ def broadcast(
295
+ self,
296
+ tensor: Tensor,
297
+ root: int,
298
+ ) -> Work: ...
299
+ @overload
300
+ def allreduce(
301
+ self,
302
+ tensors: List[Tensor],
303
+ opts: AllreduceOptions = ...,
304
+ ) -> Work: ...
305
+ @overload
306
+ def allreduce(
307
+ self,
308
+ tensors: List[Tensor],
309
+ op=...,
310
+ ) -> Work: ...
311
+ @overload
312
+ def allreduce(
313
+ self,
314
+ tensor: Tensor,
315
+ op=...,
316
+ ) -> Work: ...
317
+ def allreduce_coalesced(
318
+ self,
319
+ tensors: List[Tensor],
320
+ opts=...,
321
+ ) -> Work: ...
322
+ def reduce_scatter_tensor_coalesced(
323
+ self,
324
+ outputTensors: List[Tensor],
325
+ inputTensors: List[Tensor],
326
+ opts: Optional[ReduceScatterOptions] = None,
327
+ ) -> Work: ...
328
+ @overload
329
+ def reduce(
330
+ self,
331
+ tensors: List[Tensor],
332
+ opts=...,
333
+ ) -> Work: ...
334
+ @overload
335
+ def reduce(
336
+ self,
337
+ tensor: Tensor,
338
+ root: int,
339
+ op=...,
340
+ ) -> Work: ...
341
+ @overload
342
+ def allgather(
343
+ self,
344
+ output_tensors: List[List[Tensor]],
345
+ input_tensors: List[Tensor],
346
+ opts=...,
347
+ ) -> Work: ...
348
+ @overload
349
+ def allgather(
350
+ self,
351
+ output_tensors: List[Tensor],
352
+ input_tensor: Tensor,
353
+ ) -> Work: ...
354
+ def _allgather_base(
355
+ self,
356
+ output: Tensor,
357
+ input: Tensor,
358
+ opts=...,
359
+ ) -> Work: ...
360
+ def allgather_coalesced(
361
+ self,
362
+ output_lists: List[List[Tensor]],
363
+ input_list: List[Tensor],
364
+ opts=...,
365
+ ) -> Work: ...
366
+ def allgather_into_tensor_coalesced(
367
+ self,
368
+ output_lists: List[Tensor],
369
+ input_list: List[Tensor],
370
+ opts=...,
371
+ ) -> Work: ...
372
+ @overload
373
+ def gather(
374
+ self,
375
+ output_tensors: List[List[Tensor]],
376
+ input_tensors: List[Tensor],
377
+ opts=...,
378
+ ) -> Work: ...
379
+ @overload
380
+ def gather(
381
+ self,
382
+ output_tensors: List[Tensor],
383
+ input_tensor: Tensor,
384
+ root: int,
385
+ ) -> Work: ...
386
+ @overload
387
+ def scatter(
388
+ self,
389
+ output_tensors: List[Tensor],
390
+ input_tensors: List[List[Tensor]],
391
+ opts=...,
392
+ ) -> Work: ...
393
+ @overload
394
+ def scatter(
395
+ self,
396
+ output_tensor: Tensor,
397
+ input_tensors: List[Tensor],
398
+ root: int,
399
+ ) -> Work: ...
400
+ @overload
401
+ def reduce_scatter(
402
+ self,
403
+ output_tensors: List[Tensor],
404
+ input_tensors: List[List[Tensor]],
405
+ opts=...,
406
+ ) -> Work: ...
407
+ @overload
408
+ def reduce_scatter(
409
+ self,
410
+ output_tensors: Tensor,
411
+ input_tensor: List[Tensor],
412
+ ) -> Work: ...
413
+ def _reduce_scatter_base(
414
+ self,
415
+ outputTensor: Tensor,
416
+ inputTensor: Tensor,
417
+ opts: Optional[ReduceScatterOptions],
418
+ ) -> Work: ...
419
+ @overload
420
+ def alltoall_base(
421
+ self,
422
+ output_tensor: Tensor,
423
+ input_tensor: Tensor,
424
+ output_split_sizes: List[int],
425
+ input_split_sizes: List[int],
426
+ opts=...,
427
+ ) -> Work: ...
428
+ @overload
429
+ def alltoall_base(
430
+ self,
431
+ output: Tensor,
432
+ input: Tensor,
433
+ output_split_sizes: List[int],
434
+ input_split_sizes: List[int],
435
+ ) -> Work: ...
436
+ @overload
437
+ def alltoall(
438
+ self,
439
+ output_tensor: List[Tensor],
440
+ input_tensor: List[Tensor],
441
+ opts=...,
442
+ ) -> Work: ...
443
+ @overload
444
+ def alltoall(
445
+ self,
446
+ output: List[Tensor],
447
+ input: List[Tensor],
448
+ ) -> Work: ...
449
+ def send(
450
+ self,
451
+ tensors: List[Tensor],
452
+ dstRank: int,
453
+ tag: int,
454
+ ) -> Work: ...
455
+ def recv(
456
+ self,
457
+ tensors: List[Tensor],
458
+ srcRank: int,
459
+ tag: int,
460
+ ) -> Work: ...
461
+ def recv_anysource(self, tensors: List[Tensor], tag: int) -> Work: ...
462
+ def barrier(self, opts=...) -> Work: ...
463
+ def boxed(self) -> ScriptObject: ...
464
+ @staticmethod
465
+ def unbox(obj: ScriptObject) -> ProcessGroup: ...
466
+ def _start_coalescing(self, device: torch.device) -> None: ...
467
+ def _end_coalescing(self, device: torch.device) -> Work: ...
468
+ def _get_backend_name(self) -> str: ...
469
+ def _backend_id(self, backend_type: BackendType) -> int: ...
470
+ @property
471
+ def _device_types(self) -> List[torch.device]: ...
472
+ def _get_backend(self, device: torch.device) -> Backend: ...
473
+ def _register_backend(
474
+ self,
475
+ device: torch.device,
476
+ backend_type: BackendType,
477
+ backend: Optional[Backend],
478
+ ) -> None: ...
479
+ def _set_group_name(self, name: str) -> None: ...
480
+ def name(self) -> str: ...
481
+ def _has_hooks(self) -> bool: ...
482
+ def _wait_for_pending_works(self) -> None: ...
483
+ def _set_sequence_number_for_group(self) -> None: ...
484
+ @property
485
+ def bound_device_id(self) -> Optional[torch.device]: ...
486
+ @bound_device_id.setter
487
+ def bound_device_id(self, device: Optional[torch.device]) -> None: ...
488
+ @property
489
+ def group_name(self) -> str: ...
490
+
491
+ class ProcessGroupRoundRobin(ProcessGroup): ...
492
+
493
+ def _round_robin_process_groups(
494
+ process_groups: List[ProcessGroup],
495
+ ) -> ProcessGroupRoundRobin: ...
496
+
497
+ class ProcessGroupGloo(Backend):
498
+ class Device: ...
499
+ class Options: ...
500
+
501
+ def __init__(
502
+ self,
503
+ store: Store,
504
+ rank: int,
505
+ size: int,
506
+ timeout: timedelta,
507
+ ): ...
508
+ @staticmethod
509
+ def create_device(hostname="", interface="") -> Device: ...
510
+ @staticmethod
511
+ def create_default_device() -> Device: ...
512
+ def _set_default_timeout(self, timeout) -> None: ...
513
+
514
+ class _ProcessGroupWrapper(Backend):
515
+ def __init__(self, pg: Backend, gloo_pg: ProcessGroupGloo): ...
516
+ wrapped_pg: Backend
517
+
518
+ class ProcessGroupNCCL(Backend):
519
+ class Options:
520
+ def __init__(self, timeout: Optional[timedelta] = None): ...
521
+ @property
522
+ def backend(self) -> str: ...
523
+ @property
524
+ def _timeout(self) -> timedelta: ...
525
+ @_timeout.setter
526
+ def _timeout(self, val: timedelta) -> None: ...
527
+ @property
528
+ def _is_high_priority_stream(self) -> bool: ...
529
+ @_is_high_priority_stream.setter
530
+ def _is_high_priority_stream(self, val: bool) -> None: ...
531
+
532
+ def __init__(
533
+ self,
534
+ store: Store,
535
+ rank: int,
536
+ size: int,
537
+ timeout: timedelta,
538
+ ): ...
539
+ def _group_start(self) -> None: ...
540
+ def _group_end(self) -> None: ...
541
+ def _set_default_timeout(self, timeout) -> None: ...
542
+ def _shutdown(self) -> None: ...
543
+ @property
544
+ def uid(self) -> int: ...
545
+
546
+ class ProcessGroupUCC(Backend):
547
+ def __init__(
548
+ self,
549
+ store: Store,
550
+ rank: int,
551
+ size: int,
552
+ timeout: timedelta,
553
+ ): ...
554
+
555
+ class ProcessGroupMPI(Backend):
556
+ def __init__(
557
+ self,
558
+ rank: int,
559
+ size: int,
560
+ pgComm: int,
561
+ ): ...
562
+ @staticmethod
563
+ def create(ranks: List[int]) -> ProcessGroupMPI: ...
564
+
565
+ def _compute_bucket_assignment_by_size(
566
+ tensors: List[Tensor],
567
+ bucket_size_limits: List[int],
568
+ expect_sparse_gradient: List[bool] = ...,
569
+ tensor_indices: List[int] = ...,
570
+ ) -> Tuple[List[List[int]], List[int]]: ...
571
+ def _broadcast_coalesced(
572
+ process_group: ProcessGroup,
573
+ tensors: List[Tensor],
574
+ buffer_size: int,
575
+ src: int,
576
+ ): ...
577
+ def _test_python_store(store: Store): ...
578
+ def _verify_params_across_processes(
579
+ process_group: ProcessGroup,
580
+ params: List[Tensor],
581
+ logger: Optional[Logger],
582
+ ): ...
583
+ def _make_nccl_premul_sum(factor: Union[float, List[Tensor]]) -> ReduceOp: ...
584
+ def _register_process_group(
585
+ group_name: str,
586
+ process_group: ProcessGroup,
587
+ ) -> None: ...
588
+ def _resolve_process_group(group_name: str) -> ProcessGroup: ...
589
+ def _unregister_all_process_groups() -> None: ...
590
+ def _unregister_process_group(group_name: str) -> None: ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="type-arg"
2
+ from datetime import timedelta
3
+ from typing import Any, Dict, Generic, List, Optional, overload, Tuple, Type, TypeVar
4
+
5
+ import torch
6
+
7
+ from . import Future
8
+ from ._autograd import ProfilerEvent
9
+ from ._distributed_c10d import Store
10
+ from ._profiler import ProfilerConfig
11
+
12
+ # This module is defined in torch/csrc/distributed/rpc/init.cpp
13
+
14
+ _DEFAULT_INIT_METHOD: str
15
+ _DEFAULT_NUM_WORKER_THREADS: int
16
+ _UNSET_RPC_TIMEOUT: float
17
+ _DEFAULT_RPC_TIMEOUT_SEC: float
18
+
19
+ _T = TypeVar("_T")
20
+
21
+ class RpcBackendOptions:
22
+ rpc_timeout: float
23
+ init_method: str
24
+ def __init__(
25
+ self,
26
+ rpc_timeout: float = ...,
27
+ init_method: str = ...,
28
+ ): ...
29
+
30
+ class WorkerInfo:
31
+ def __init__(self, name: str, worker_id: int): ...
32
+ @property
33
+ def name(self) -> str: ...
34
+ @property
35
+ def id(self) -> int: ...
36
+ def __eq__(self, other: object) -> bool: ...
37
+
38
+ class RpcAgent:
39
+ def join(self, shutdown: bool = False, timeout: float = 0): ...
40
+ def sync(self): ...
41
+ def shutdown(self): ...
42
+ @overload
43
+ def get_worker_info(self) -> WorkerInfo: ...
44
+ @overload
45
+ def get_worker_info(self, workerName: str) -> WorkerInfo: ...
46
+ def get_worker_infos(self) -> List[WorkerInfo]: ...
47
+ def _get_device_map(self, dst: WorkerInfo) -> Dict[torch.device, torch.device]: ...
48
+ def get_debug_info(self) -> Dict[str, str]: ...
49
+ def get_metrics(self) -> Dict[str, str]: ...
50
+
51
+ class PyRRef(Generic[_T]):
52
+ def __init__(self, value: _T, type_hint: Any = None) -> None: ...
53
+ def is_owner(self) -> bool: ...
54
+ def confirmed_by_owner(self) -> bool: ...
55
+ def owner(self) -> WorkerInfo: ...
56
+ def owner_name(self) -> str: ...
57
+ def to_here(self, timeout: float = ...) -> _T: ...
58
+ def local_value(self) -> Any: ...
59
+ def rpc_sync(self, timeout: float = ...) -> Any: ...
60
+ def rpc_async(self, timeout: float = ...) -> Any: ...
61
+ def remote(self, timeout: float = ...) -> Any: ...
62
+ def _serialize(self) -> Tuple: ...
63
+ @staticmethod
64
+ def _deserialize(tp: Tuple) -> PyRRef: ...
65
+ def _get_type(self) -> Type[_T]: ...
66
+ def _get_future(self) -> Future[_T]: ...
67
+ def _get_profiling_future(self) -> Future[_T]: ...
68
+ def _set_profiling_future(self, profilingFuture: Future[_T]): ...
69
+
70
+ class _TensorPipeRpcBackendOptionsBase(RpcBackendOptions):
71
+ num_worker_threads: int
72
+ device_maps: Dict[str, Dict[torch.device, torch.device]]
73
+ devices: List[torch.device]
74
+ def __init__(
75
+ self,
76
+ num_worker_threads: int,
77
+ _transports: Optional[List],
78
+ _channels: Optional[List],
79
+ rpc_timeout: float = ...,
80
+ init_method: str = ...,
81
+ device_maps: Dict[str, Dict[torch.device, torch.device]] = {}, # noqa: B006
82
+ devices: List[torch.device] = [], # noqa: B006
83
+ ): ...
84
+ def _set_device_map(
85
+ self,
86
+ to: str,
87
+ device_map: Dict[torch.device, torch.device],
88
+ ): ...
89
+
90
+ class TensorPipeAgent(RpcAgent):
91
+ def __init__(
92
+ self,
93
+ store: Store,
94
+ name: str,
95
+ worker_id: int,
96
+ world_size: Optional[int],
97
+ opts: _TensorPipeRpcBackendOptionsBase,
98
+ reverse_device_maps: Dict[str, Dict[torch.device, torch.device]],
99
+ devices: List[torch.device],
100
+ ): ...
101
+ def join(self, shutdown: bool = False, timeout: float = 0): ...
102
+ def shutdown(self): ...
103
+ @overload
104
+ def get_worker_info(self) -> WorkerInfo: ...
105
+ @overload
106
+ def get_worker_info(self, workerName: str) -> WorkerInfo: ...
107
+ @overload
108
+ def get_worker_info(self, id: int) -> WorkerInfo: ...
109
+ def get_worker_infos(self) -> List[WorkerInfo]: ...
110
+ def _get_device_map(self, dst: WorkerInfo) -> Dict[torch.device, torch.device]: ...
111
+ def _update_group_membership(
112
+ self,
113
+ worker_info: WorkerInfo,
114
+ my_devices: List[torch.device],
115
+ reverse_device_map: Dict[str, Dict[torch.device, torch.device]],
116
+ is_join: bool,
117
+ ): ...
118
+ def _get_backend_options(self) -> _TensorPipeRpcBackendOptionsBase: ...
119
+ @property
120
+ def is_static_group(self) -> bool: ...
121
+ @property
122
+ def store(self) -> Store: ...
123
+
124
+ def _is_current_rpc_agent_set() -> bool: ...
125
+ def _get_current_rpc_agent() -> RpcAgent: ...
126
+ def _set_and_start_rpc_agent(agent: RpcAgent): ...
127
+ def _reset_current_rpc_agent(): ...
128
+ def _delete_all_user_and_unforked_owner_rrefs(timeout: timedelta = ...): ...
129
+ def _destroy_rref_context(ignoreRRefLeak: bool): ...
130
+ def _rref_context_get_debug_info() -> Dict[str, str]: ...
131
+ def _cleanup_python_rpc_handler(): ...
132
+ def _invoke_rpc_builtin(
133
+ dst: WorkerInfo,
134
+ opName: str,
135
+ rpcTimeoutSeconds: float,
136
+ *args: Any,
137
+ **kwargs: Any,
138
+ ): ...
139
+ def _invoke_rpc_python_udf(
140
+ dst: WorkerInfo,
141
+ pickledPythonUDF: str,
142
+ tensors: List[torch.Tensor],
143
+ rpcTimeoutSeconds: float,
144
+ isAsyncExecution: bool,
145
+ ): ...
146
+ def _invoke_rpc_torchscript(
147
+ dstWorkerName: str,
148
+ qualifiedNameStr: str,
149
+ argsTuple: Tuple,
150
+ kwargsDict: Dict,
151
+ rpcTimeoutSeconds: float,
152
+ isAsyncExecution: bool,
153
+ ): ...
154
+ def _invoke_remote_builtin(
155
+ dst: WorkerInfo,
156
+ opName: str,
157
+ rpcTimeoutSeconds: float,
158
+ *args: Any,
159
+ **kwargs: Any,
160
+ ): ...
161
+ def _invoke_remote_python_udf(
162
+ dst: WorkerInfo,
163
+ pickledPythonUDF: str,
164
+ tensors: List[torch.Tensor],
165
+ rpcTimeoutSeconds: float,
166
+ isAsyncExecution: bool,
167
+ ): ...
168
+ def _invoke_remote_torchscript(
169
+ dstWorkerName: WorkerInfo,
170
+ qualifiedNameStr: str,
171
+ rpcTimeoutSeconds: float,
172
+ isAsyncExecution: bool,
173
+ *args: Any,
174
+ **kwargs: Any,
175
+ ): ...
176
+ def get_rpc_timeout() -> float: ...
177
+ def enable_gil_profiling(flag: bool): ...
178
+ def _set_rpc_timeout(rpcTimeoutSeconds: float): ...
179
+
180
+ class RemoteProfilerManager:
181
+ @staticmethod
182
+ def set_current_profiling_key(key: str): ...
183
+
184
+ def _enable_server_process_global_profiler(new_config: ProfilerConfig): ...
185
+ def _disable_server_process_global_profiler() -> List[List[List[ProfilerEvent]]]: ...
186
+ def _set_profiler_node_id(default_node_id: int): ...
187
+ def _enable_jit_rref_pickle(): ...
188
+ def _disable_jit_rref_pickle(): ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List
2
+
3
+ import torch
4
+
5
+ from ._distributed_c10d import Store
6
+ from ._distributed_rpc import _TensorPipeRpcBackendOptionsBase, TensorPipeAgent
7
+
8
+ # This module is defined in torch/csrc/distributed/rpc/testing/init.cpp
9
+
10
+ class FaultyTensorPipeRpcBackendOptions(_TensorPipeRpcBackendOptionsBase):
11
+ def __init__(
12
+ self,
13
+ num_worker_threads: int,
14
+ rpc_timeout: float,
15
+ init_method: str,
16
+ messages_to_fail: List[str],
17
+ messages_to_delay: Dict[str, float],
18
+ num_fail_sends: int,
19
+ ): ...
20
+ num_send_recv_threads: int
21
+ messages_to_fail: List[str]
22
+ messages_to_delay: Dict[str, float]
23
+ num_fail_sends: int
24
+
25
+ class FaultyTensorPipeAgent(TensorPipeAgent):
26
+ def __init__(
27
+ self,
28
+ store: Store,
29
+ name: str,
30
+ rank: int,
31
+ world_size: int,
32
+ options: FaultyTensorPipeRpcBackendOptions,
33
+ reverse_device_maps: Dict[str, Dict[torch.device, torch.device]],
34
+ devices: List[torch.device],
35
+ ): ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_functorch.pyi ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import Optional, Tuple
3
+
4
+ from torch import Tensor
5
+
6
+ # Defined in torch/csrc/functorch/init.cpp
7
+
8
+ def _set_dynamic_layer_keys_included(included: bool) -> None: ...
9
+ def get_unwrapped(tensor: Tensor) -> Tensor: ...
10
+ def is_batchedtensor(tensor: Tensor) -> bool: ...
11
+ def is_functionaltensor(tensor: Tensor) -> bool: ...
12
+ def is_functorch_wrapped_tensor(tensor: Tensor) -> bool: ...
13
+ def is_gradtrackingtensor(tensor: Tensor) -> bool: ...
14
+ def maybe_get_bdim(tensor: Tensor) -> int: ...
15
+ def maybe_get_level(tensor: Tensor) -> int: ...
16
+ def maybe_current_level() -> Optional[int]: ...
17
+ def unwrap_if_dead(tensor: Tensor) -> Tensor: ...
18
+ def _unwrap_for_grad(tensor: Tensor, level: int) -> Tensor: ...
19
+ def _wrap_for_grad(tensor: Tensor, level: int) -> Tensor: ...
20
+ def _unwrap_batched(tensor: Tensor, level: int) -> Tuple[Tensor, Optional[int]]: ...
21
+ def current_level() -> int: ...
22
+ def _add_batch_dim(tensor: Tensor, bdim: int, level: int) -> Tensor: ...
23
+ def set_single_level_autograd_function_allowed(allowed: bool) -> None: ...
24
+ def get_single_level_autograd_function_allowed() -> bool: ...
25
+ def _unwrap_functional_tensor(tensor: Tensor, reapply_views: bool) -> Tensor: ...
26
+ def _wrap_functional_tensor(tensor: Tensor, level: int) -> Tensor: ...
27
+ def _vmap_increment_nesting(batch_size: int, randomness: str) -> int: ...
28
+ def _vmap_decrement_nesting() -> int: ...
29
+ def _grad_increment_nesting() -> int: ...
30
+ def _grad_decrement_nesting() -> int: ...
31
+
32
+ # Defined in aten/src/ATen/functorch/Interpreter.h
33
+ class TransformType(Enum):
34
+ Torch: TransformType = ...
35
+ Vmap: TransformType = ...
36
+ Grad: TransformType = ...
37
+ Jvp: TransformType = ...
38
+ Functionalize: TransformType = ...
39
+
40
+ class RandomnessType(Enum):
41
+ Error: TransformType = ...
42
+ Same: TransformType = ...
43
+ Different: TransformType = ...
44
+
45
+ class CInterpreter:
46
+ def key(self) -> TransformType: ...
47
+ def level(self) -> int: ...
48
+
49
+ class CGradInterpreterPtr:
50
+ def __init__(self, interpreter: CInterpreter): ...
51
+ def lift(self, Tensor) -> Tensor: ...
52
+ def prevGradMode(self) -> bool: ...
53
+
54
+ class CJvpInterpreterPtr:
55
+ def __init__(self, interpreter: CInterpreter): ...
56
+ def lift(self, Tensor) -> Tensor: ...
57
+ def prevFwdGradMode(self) -> bool: ...
58
+
59
+ class CFunctionalizeInterpreterPtr:
60
+ def __init__(self, interpreter: CInterpreter): ...
61
+ def key(self) -> TransformType: ...
62
+ def level(self) -> int: ...
63
+ def functionalizeAddBackViews(self) -> bool: ...
64
+
65
+ class CVmapInterpreterPtr:
66
+ def __init__(self, interpreter: CInterpreter): ...
67
+ def key(self) -> TransformType: ...
68
+ def level(self) -> int: ...
69
+ def batchSize(self) -> int: ...
70
+ def randomness(self) -> RandomnessType: ...
71
+
72
+ class DynamicLayer: ...
73
+
74
+ def get_interpreter_stack() -> list[CInterpreter]: ...
75
+ def peek_interpreter_stack() -> CInterpreter: ...
76
+ def pop_dynamic_layer_stack() -> DynamicLayer: ...
77
+ def push_dynamic_layer_stack(dl: DynamicLayer) -> int: ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_nn.pyi ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="type-arg"
2
+ from typing import List, Optional, overload, Sequence, Tuple, Union
3
+
4
+ from torch import memory_format, Tensor
5
+ from torch.types import _bool, _device, _dtype, _int, _size
6
+
7
+ # Defined in tools/autograd/templates/python_nn_functions.cpp
8
+
9
+ def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
10
+ def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ...
11
+ def avg_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ...
12
+ def avg_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ...
13
+ def elu_(input: Tensor, alpha: float = ...) -> Tensor: ...
14
+ def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ...
15
+ def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ...
16
+ def gelu(input: Tensor, approximate: str = ...) -> Tensor: ...
17
+ def hardsigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ...
18
+ def hardtanh(input: Tensor, min_val: float = ..., max_val: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ...
19
+ def hardtanh_(input: Tensor, min_val: float = ..., max_val: float = ...) -> Tensor: ...
20
+ def leaky_relu(input: Tensor, negative_slope: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ...
21
+ def leaky_relu_(input: Tensor, negative_slope: float = ...) -> Tensor: ...
22
+ def linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ...
23
+ def log_sigmoid(input: Tensor) -> Tensor: ...
24
+ def one_hot(tensor: Tensor, num_classes: int = ...) -> Tensor: ...
25
+ def pad(input: Tensor, pad: Sequence[int], mode: str = ..., value: Optional[float] = None) -> Tensor: ...
26
+ def scaled_dot_product_attention(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None) -> Tensor: ...
27
+ def softplus(input: Tensor, beta: float = ..., threshold: float = ...) -> Tensor: ...
28
+ def softshrink(input: Tensor, lambd: float = ...) -> Tensor: ...
29
+
30
+ # Defined in aten/src/ATen/native/mkldnn/Linear.cpp
31
+ def mkldnn_linear(input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: ...
32
+
33
+ # Defined at aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp
34
+ def mkldnn_reorder_conv2d_weight(
35
+ self: Tensor,
36
+ padding: List,
37
+ stride: List,
38
+ dilatation: List,
39
+ groups: int,
40
+ ) -> Tensor: ...
41
+ def mkldnn_reorder_conv3d_weight(
42
+ self: Tensor,
43
+ padding: List,
44
+ stride: List,
45
+ dilatation: List,
46
+ groups: int,
47
+ ) -> Tensor: ...
48
+
49
+ # Defined in aten/src/ATen/native/mkldnn/Prelu.cpp
50
+ def mkldnn_prelu(input: Tensor, weight: Tensor) -> Tensor: ...
51
+
52
+ # Defined at tools/autograd/templates/python_nn_functions.cpp
53
+ @overload
54
+ def _parse_to(
55
+ device: _device,
56
+ dtype: _dtype,
57
+ non_blocking: _bool,
58
+ copy: _bool,
59
+ *,
60
+ memory_format: memory_format,
61
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
62
+ @overload
63
+ def _parse_to(
64
+ dtype: _dtype,
65
+ non_blocking: _bool,
66
+ copy: _bool,
67
+ *,
68
+ memory_format: memory_format,
69
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
70
+ @overload
71
+ def _parse_to(
72
+ tensor: Tensor,
73
+ non_blocking: _bool,
74
+ copy: _bool,
75
+ *,
76
+ memory_format: memory_format,
77
+ ) -> Tuple[_device, _dtype, _bool, memory_format]: ...
78
+
79
+ # Defined in aten/src/ATen/native/PadSequence.cpp
80
+ def pad_sequence(
81
+ sequences: List[Tensor],
82
+ batch_first: bool = False,
83
+ padding_value: float = ...,
84
+ ) -> Tensor: ...
85
+ def flatten_dense_tensors(tensors: List[Tensor]) -> Tensor: ...
86
+ def unflatten_dense_tensors(flat: Tensor, tensors: List[Tensor]) -> List[Tensor]: ...
llmeval-env/lib/python3.10/site-packages/torch/_C/_onnx.pyi ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Defined in torch/csrc/onnx/init.cpp
2
+
3
+ from enum import Enum
4
+
5
+ _CAFFE2_ATEN_FALLBACK: bool
6
+ PRODUCER_VERSION: str
7
+
8
+ class TensorProtoDataType(Enum):
9
+ UNDEFINED = ...
10
+ FLOAT = ...
11
+ UINT8 = ...
12
+ INT8 = ...
13
+ UINT16 = ...
14
+ INT16 = ...
15
+ INT32 = ...
16
+ INT64 = ...
17
+ STRING = ...
18
+ BOOL = ...
19
+ FLOAT16 = ...
20
+ DOUBLE = ...
21
+ UINT32 = ...
22
+ UINT64 = ...
23
+ COMPLEX64 = ...
24
+ COMPLEX128 = ...
25
+ BFLOAT16 = ...
26
+ FLOAT8E5M2 = ...
27
+ FLOAT8E4M3FN = ...
28
+ FLOAT8E5M2FNUZ = ...
29
+ FLOAT8E4M3FNUZ = ...
30
+
31
+ class OperatorExportTypes(Enum):
32
+ ONNX = ...
33
+ ONNX_ATEN = ...
34
+ ONNX_ATEN_FALLBACK = ...
35
+ ONNX_FALLTHROUGH = ...
36
+
37
+ class TrainingMode(Enum):
38
+ EVAL = ...
39
+ PRESERVE = ...
40
+ TRAINING = ...
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .checkpoint_activation import checkpoint
2
+ from .contract import _get_registry, contract
3
+ from .fully_shard import fully_shard
4
+ from .replicate import replicate
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (310 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_api.cpython-310.pyc ADDED
Binary file (2.74 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_collectives.cpython-310.pyc ADDED
Binary file (6.58 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc ADDED
Binary file (5.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc ADDED
Binary file (4.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc ADDED
Binary file (8.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/fully_shard.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/replicate.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import weakref
2
+ from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch.distributed._composable_state import _State
7
+ from torch.nn.parallel import DistributedDataParallel
8
+
9
+ from .contract import _get_registry, contract
10
+
11
+ _ROOT_MODULE_PREFIX = ""
12
+
13
+
14
+ class _ReplicateState(_State):
15
+ def __init__(self) -> None:
16
+ super().__init__()
17
+ self.module: nn.Module = nn.ParameterList()
18
+ self.has_initialized: bool = False
19
+ self._param_list: nn.ParameterList = nn.ParameterList()
20
+ # TODO(@fegin): this variable is originally create for testing, we
21
+ # should remove this if possible.
22
+ self._param_names: List[str] = []
23
+
24
+ def _collect_params(
25
+ self,
26
+ module: nn.Module,
27
+ ignored_modules: Set[nn.Module],
28
+ ignored_params: Set[nn.Parameter],
29
+ prefix: str = _ROOT_MODULE_PREFIX,
30
+ ) -> None:
31
+ # skip if managed by fully_sharded API
32
+ if _is_fully_sharded(module):
33
+ return
34
+
35
+ # if a module is ignored, all descendants of the module are ignored.
36
+ if module in ignored_modules:
37
+ return
38
+
39
+ recurse_prefix = (
40
+ f"{prefix}." if prefix != _ROOT_MODULE_PREFIX else _ROOT_MODULE_PREFIX
41
+ )
42
+
43
+ for n, p in module.named_parameters(recurse=False):
44
+ if p not in ignored_params:
45
+ self._param_list.append(p)
46
+ self._param_names.append(f"{recurse_prefix}{n}")
47
+
48
+ for name, child_module in module.named_children():
49
+ self._collect_params(
50
+ child_module,
51
+ ignored_modules,
52
+ ignored_params,
53
+ prefix=f"{recurse_prefix}{name}",
54
+ )
55
+
56
+ def init(
57
+ self,
58
+ module: nn.Module,
59
+ ignored_modules: Set[nn.Module],
60
+ **kwargs,
61
+ ) -> None:
62
+ if _is_fully_sharded(module):
63
+ raise RuntimeError(
64
+ "Cannot apply `replicate()` on a Module already managed by `fully_shard`"
65
+ )
66
+
67
+ if self.has_initialized:
68
+ return
69
+
70
+ self.has_initialized = True
71
+ self.module = module
72
+ ignored_params = {p for m in ignored_modules for p in m.parameters()}
73
+ self._collect_params(module, ignored_modules, ignored_params)
74
+ module.register_forward_pre_hook(self.forward_pre_hook, with_kwargs=True)
75
+ module.register_forward_hook(self.forward_post_hook) # type: ignore[arg-type]
76
+
77
+ if "device_id" in kwargs:
78
+ # replicate() supports a small usability enhancement where
79
+ # user can pass in device_id as a Union[int, torch.device] even for
80
+ # CPU devices so users don't have to change code for CPU/GPU runs.
81
+ # We derive the right device_ids to feed into DDP to support this.
82
+ if kwargs["device_id"] is not None:
83
+ device_id = kwargs["device_id"]
84
+ # Convert to device_ids that DDP expects.
85
+ if isinstance(device_id, torch.device) and device_id.type == "cpu":
86
+ # CPU modules receive device_ids None
87
+ kwargs["device_ids"] = None
88
+ else:
89
+ # GPU modules expect device_ids=[cuda_device]
90
+ kwargs["device_ids"] = [device_id]
91
+ else:
92
+ kwargs["device_ids"] = None
93
+ kwargs.pop("device_id")
94
+
95
+ self._ddp = DistributedDataParallel(self._param_list, **kwargs)
96
+ # Weakref to the DDP instance is currently only used for testing.
97
+ replicate.state(self.module)._ddp_weakref = weakref.ref(self._ddp)
98
+
99
+ def forward_pre_hook(
100
+ self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
101
+ ) -> Any:
102
+ return self._ddp._pre_forward(*args, **kwargs)
103
+
104
+ def forward_post_hook(
105
+ self,
106
+ module: nn.Module,
107
+ input: Tuple[torch.Tensor],
108
+ output: torch.Tensor,
109
+ ) -> torch.Tensor:
110
+ return self._ddp._post_forward(output)
111
+
112
+
113
+ @contract(state_cls=_ReplicateState)
114
+ def replicate(
115
+ module: nn.Module,
116
+ ignored_modules: Optional[Iterable[torch.nn.Module]] = None,
117
+ **kwargs,
118
+ ) -> nn.Module:
119
+ r"""Replicates a module
120
+
121
+ Args:
122
+ module (torch.nn.Module): module to replicate
123
+
124
+ Example::
125
+ >>> # xdoctest: +REQUIRES(module:torch._C._distributed_c10d)
126
+ >>> module = nn.Linear(3, 3)
127
+ >>> replicate(module)
128
+ """
129
+ torch._C._log_api_usage_once("torch.distributed.replicate")
130
+
131
+ # TODO(fegin): using kwargs is not a good idea if we would like to make
132
+ # replicate a formal API to replace DDP.
133
+ if "device_id" in kwargs:
134
+ if not isinstance(kwargs["device_id"], (int, torch.device)):
135
+ raise RuntimeError(
136
+ "Expected device_id to be int or torch.device, "
137
+ f"but got {type(kwargs['device_id'])}"
138
+ )
139
+
140
+ if ignored_modules is None:
141
+ ignored_modules = {}
142
+ else:
143
+ ignored_modules = set(ignored_modules)
144
+ replicate.state(module).init(module, ignored_modules, **kwargs)
145
+
146
+ return module
147
+
148
+
149
+ def _is_fully_sharded(module: nn.Module) -> bool:
150
+ r"""Check if module is marked with fully_shard."""
151
+ registry = _get_registry(module)
152
+ if registry is None:
153
+ return False
154
+ return "fully_shard" in registry
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/pointwise_ops.py ADDED
@@ -0,0 +1,629 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ from typing import List, Sequence, Tuple
3
+
4
+ import torch
5
+
6
+ from torch.distributed._tensor.op_schema import (
7
+ _is_inplace_op,
8
+ _is_out_variant_op,
9
+ OpSchema,
10
+ OpStrategy,
11
+ PlacementStrategy,
12
+ RuntimeSchemaInfo,
13
+ StrategyType,
14
+ TupleStrategy,
15
+ )
16
+
17
+ from torch.distributed._tensor.ops.utils import (
18
+ generate_redistribute_costs,
19
+ infer_broadcast_dims_map,
20
+ map_placements_after_broadcast,
21
+ normalize_dim,
22
+ register_op_strategy,
23
+ )
24
+ from torch.distributed._tensor.placement_types import (
25
+ _Partial,
26
+ DTensorSpec,
27
+ Placement,
28
+ Replicate,
29
+ Shard,
30
+ )
31
+ from torch.distributed.device_mesh import DeviceMesh
32
+
33
+
34
+ aten = torch.ops.aten
35
+ # leave the remaining pointwise_ops list here for convenience,
36
+ # Below ops are some pointwise ops that are yet to be supported,
37
+ # they might not be a complete list.
38
+ # pointwise_ops = [
39
+ # "fake_quantize_per_channel_affine",
40
+ # "fake_quantize_per_tensor_affine",
41
+ # "floor_divide", # floor_divide is deprecated
42
+ # "frexp", # multiple output pointwise op, need to add support
43
+ # "gradient", # need investigation on this op
44
+ # "imag", # complex data type only
45
+ # "quantized_batch_norm",
46
+ # "quantized_max_pool1d",
47
+ # "quantized_max_pool2d",
48
+ # "real", # complex data type only
49
+ # ]
50
+
51
+
52
+ linear_pointwise_ops = [
53
+ aten.div.Scalar, # this op is linear on the first argument, and the second argument is scalar, so it fits as a linear op.
54
+ aten.div_.Scalar, # this op is linear on the first argument, and the second argument is scalar, so it fits as a linear op.
55
+ aten.to.dtype,
56
+ aten.add.Tensor,
57
+ aten.add_.Tensor,
58
+ ]
59
+
60
+
61
+ pointwise_ops = [
62
+ # please keep the entries below alphabetically sorted
63
+ aten.abs.default,
64
+ aten.abs.out,
65
+ aten.abs_.default,
66
+ aten.acos.default,
67
+ aten.acos.out,
68
+ aten.acos_.default,
69
+ aten.acosh.default,
70
+ aten.acosh.out,
71
+ aten.acosh_.default,
72
+ aten.add.Scalar,
73
+ aten.add.out,
74
+ aten.add_.Scalar,
75
+ aten.addcdiv.default,
76
+ aten.addcdiv.out,
77
+ aten.addcdiv_.default,
78
+ aten.addcmul.default,
79
+ aten.addcmul.out,
80
+ aten.addcmul_.default,
81
+ aten.angle.default,
82
+ aten.angle.out,
83
+ aten.asin.default,
84
+ aten.asin.out,
85
+ aten.asin_.default,
86
+ aten.asinh.default,
87
+ aten.asinh.out,
88
+ aten.asinh_.default,
89
+ aten.atan.default,
90
+ aten.atan.out,
91
+ aten.atan2.default,
92
+ aten.atan2.out,
93
+ aten.atan2_.default,
94
+ aten.atan_.default,
95
+ aten.atanh.default,
96
+ aten.atanh.out,
97
+ aten.atanh_.default,
98
+ aten.bitwise_and.Scalar,
99
+ aten.bitwise_and.Scalar_Tensor,
100
+ aten.bitwise_and.Scalar_out,
101
+ aten.bitwise_and.Tensor,
102
+ aten.bitwise_and.Tensor_out,
103
+ aten.bitwise_and_.Scalar,
104
+ aten.bitwise_and_.Tensor,
105
+ aten.bitwise_left_shift.Scalar_Tensor,
106
+ aten.bitwise_left_shift.Tensor,
107
+ aten.bitwise_left_shift.Tensor_Scalar,
108
+ aten.bitwise_left_shift.Tensor_Scalar_out,
109
+ aten.bitwise_left_shift.Tensor_out,
110
+ aten.bitwise_left_shift_.Tensor,
111
+ aten.bitwise_left_shift_.Tensor_Scalar,
112
+ aten.bitwise_not.default,
113
+ aten.bitwise_not.out,
114
+ aten.bitwise_not_.default,
115
+ aten.bitwise_or.Scalar,
116
+ aten.bitwise_or.Scalar_Tensor,
117
+ aten.bitwise_or.Scalar_out,
118
+ aten.bitwise_or.Tensor,
119
+ aten.bitwise_or.Tensor_out,
120
+ aten.bitwise_or_.Scalar,
121
+ aten.bitwise_or_.Tensor,
122
+ aten.bitwise_right_shift.Scalar_Tensor,
123
+ aten.bitwise_right_shift.Tensor,
124
+ aten.bitwise_right_shift.Tensor_Scalar,
125
+ aten.bitwise_right_shift.Tensor_Scalar_out,
126
+ aten.bitwise_right_shift.Tensor_out,
127
+ aten.bitwise_right_shift_.Tensor,
128
+ aten.bitwise_right_shift_.Tensor_Scalar,
129
+ aten.bitwise_xor.Scalar,
130
+ aten.bitwise_xor.Scalar_Tensor,
131
+ aten.bitwise_xor.Scalar_out,
132
+ aten.bitwise_xor.Tensor,
133
+ aten.bitwise_xor.Tensor_out,
134
+ aten.bitwise_xor_.Scalar,
135
+ aten.bitwise_xor_.Tensor,
136
+ aten.ceil.default,
137
+ aten.ceil.out,
138
+ aten.ceil_.default,
139
+ aten.clamp.default,
140
+ aten.clamp.out,
141
+ aten.clamp_.default,
142
+ aten.clip.default,
143
+ aten.clip.out,
144
+ aten.clip_.default,
145
+ aten.conj_physical.default,
146
+ aten.conj_physical.out,
147
+ aten.conj_physical_.default,
148
+ aten.copysign.Scalar,
149
+ aten.copysign.Scalar_out,
150
+ aten.copysign.Tensor,
151
+ aten.copysign.out,
152
+ aten.copysign_.Scalar,
153
+ aten.copysign_.Tensor,
154
+ aten.cos.default,
155
+ aten.cos.out,
156
+ aten.cos_.default,
157
+ aten.cosh.default,
158
+ aten.cosh.out,
159
+ aten.cosh_.default,
160
+ aten.deg2rad.default,
161
+ aten.deg2rad.out,
162
+ aten.deg2rad_.default,
163
+ aten.digamma.default,
164
+ aten.digamma.out,
165
+ aten.digamma_.default,
166
+ aten.div.Tensor,
167
+ aten.div.Tensor_mode,
168
+ aten.div.out,
169
+ aten.div.out_mode,
170
+ aten.div_.Tensor,
171
+ aten.div_.Tensor_mode,
172
+ aten.eq.Tensor,
173
+ aten.eq.Tensor_out,
174
+ aten.eq.Scalar,
175
+ aten.eq.Scalar_out,
176
+ aten.erf.default,
177
+ aten.erf.out,
178
+ aten.erf_.default,
179
+ aten.erfc.default,
180
+ aten.erfc.out,
181
+ aten.erfc_.default,
182
+ aten.erfinv.default,
183
+ aten.erfinv.out,
184
+ aten.erfinv_.default,
185
+ aten.exp.default,
186
+ aten.exp.out,
187
+ aten.exp2.default,
188
+ aten.exp2.out,
189
+ aten.exp2_.default,
190
+ aten.exp_.default,
191
+ aten.expm1.default,
192
+ aten.expm1.out,
193
+ aten.expm1_.default,
194
+ aten.float_power.Scalar,
195
+ aten.float_power.Scalar_out,
196
+ aten.float_power.Tensor_Scalar,
197
+ aten.float_power.Tensor_Scalar_out,
198
+ aten.float_power.Tensor_Tensor,
199
+ aten.float_power.Tensor_Tensor_out,
200
+ aten.float_power_.Scalar,
201
+ aten.float_power_.Tensor,
202
+ aten.floor.default,
203
+ aten.floor.out,
204
+ aten.floor_.default,
205
+ aten.fmod.Scalar,
206
+ aten.fmod.Scalar_out,
207
+ aten.fmod.Tensor,
208
+ aten.fmod.Tensor_out,
209
+ aten.fmod_.Scalar,
210
+ aten.fmod_.Tensor,
211
+ aten.frac.default,
212
+ aten.frac.out,
213
+ aten.frac_.default,
214
+ aten.ge.Scalar,
215
+ aten.ge.Tensor,
216
+ aten.gelu.default,
217
+ aten.gt.Tensor,
218
+ aten.gt.Tensor_out,
219
+ aten.gt.Scalar,
220
+ aten.gt.Scalar_out,
221
+ aten.gt.Scalar,
222
+ aten.gt.Tensor,
223
+ aten.hypot.default,
224
+ aten.hypot.out,
225
+ aten.hypot_.default,
226
+ aten.i0.default,
227
+ aten.i0.out,
228
+ aten.i0_.default,
229
+ aten.igamma.default,
230
+ aten.igamma.out,
231
+ aten.igamma_.default,
232
+ aten.igammac.default,
233
+ aten.igammac.out,
234
+ aten.igammac_.default,
235
+ aten.isnan.default,
236
+ aten.ldexp.default,
237
+ aten.ldexp.out,
238
+ aten.ldexp_.default,
239
+ aten.lt.Tensor,
240
+ aten.lt.Tensor_out,
241
+ aten.lt.Scalar,
242
+ aten.lt.Scalar_out,
243
+ aten.le.Scalar,
244
+ aten.le.Tensor,
245
+ aten.lerp.Scalar,
246
+ aten.lerp.Scalar_out,
247
+ aten.lerp.Tensor,
248
+ aten.lerp.Tensor_out,
249
+ aten.lerp_.Scalar,
250
+ aten.lerp_.Tensor,
251
+ aten.lgamma.default,
252
+ aten.lgamma.out,
253
+ aten.lgamma_.default,
254
+ aten.log.default,
255
+ aten.log.out,
256
+ aten.log10.default,
257
+ aten.log10.out,
258
+ aten.log10_.default,
259
+ aten.log1p.default,
260
+ aten.log1p.out,
261
+ aten.log1p_.default,
262
+ aten.log2.default,
263
+ aten.log2.out,
264
+ aten.log2_.default,
265
+ aten.log_.default,
266
+ aten.logaddexp.default,
267
+ aten.logaddexp.out,
268
+ aten.logaddexp2.default,
269
+ aten.logaddexp2.out,
270
+ aten.logical_and.default,
271
+ aten.logical_and.out,
272
+ aten.logical_and_.default,
273
+ aten.logical_not.default,
274
+ aten.logical_not.out,
275
+ aten.logical_not_.default,
276
+ aten.logical_or.default,
277
+ aten.logical_or.out,
278
+ aten.logical_or_.default,
279
+ aten.logical_xor.default,
280
+ aten.logical_xor.out,
281
+ aten.logical_xor_.default,
282
+ aten.logit.default,
283
+ aten.logit.out,
284
+ aten.logit_.default,
285
+ aten.masked_fill.Scalar,
286
+ aten.maximum.out,
287
+ aten.mul.Scalar,
288
+ aten.mul.Tensor,
289
+ aten.mul.out,
290
+ aten.mul_.Scalar,
291
+ aten.mul_.Tensor,
292
+ aten.mvlgamma.default,
293
+ aten.mvlgamma.out,
294
+ aten.mvlgamma_.default,
295
+ aten.native_dropout_backward.default,
296
+ aten.native_dropout_backward.out,
297
+ aten.nan_to_num.default,
298
+ aten.nan_to_num.out,
299
+ aten.nan_to_num_.default,
300
+ aten.ne.Scalar,
301
+ aten.neg.default,
302
+ aten.neg.out,
303
+ aten.neg_.default,
304
+ aten.nextafter.default,
305
+ aten.nextafter.out,
306
+ aten.nextafter_.default,
307
+ aten.polygamma.default,
308
+ aten.polygamma.out,
309
+ aten.polygamma_.default,
310
+ aten.positive.default,
311
+ aten.pow.Scalar,
312
+ aten.pow.Scalar_out,
313
+ aten.pow.Tensor_Scalar,
314
+ aten.pow.Tensor_Scalar_out,
315
+ aten.pow.Tensor_Tensor,
316
+ aten.pow.Tensor_Tensor_out,
317
+ aten.pow_.Scalar,
318
+ aten.pow_.Tensor,
319
+ aten.reciprocal.default,
320
+ aten.reciprocal.out,
321
+ aten.reciprocal_.default,
322
+ aten.rad2deg.default,
323
+ aten.rad2deg.out,
324
+ aten.rad2deg_.default,
325
+ aten.relu.default,
326
+ aten.relu_.default,
327
+ aten.remainder.Scalar,
328
+ aten.remainder.Scalar_Tensor,
329
+ aten.remainder.Scalar_out,
330
+ aten.remainder.Tensor,
331
+ aten.remainder.Tensor_out,
332
+ aten.remainder_.Scalar,
333
+ aten.remainder_.Tensor,
334
+ aten.round.decimals,
335
+ aten.round.decimals_out,
336
+ aten.round.default,
337
+ aten.round.out,
338
+ aten.round_.decimals,
339
+ aten.round_.default,
340
+ aten.rsqrt.default,
341
+ aten.rsqrt.out,
342
+ aten.rsqrt_.default,
343
+ aten.rsub.Scalar,
344
+ aten.sgn.default,
345
+ aten.sgn.out,
346
+ aten.sgn_.default,
347
+ aten.sigmoid.default,
348
+ aten.sigmoid.out,
349
+ aten.sigmoid_.default,
350
+ aten.sign.default,
351
+ aten.sign.out,
352
+ aten.sign_.default,
353
+ aten.signbit.default,
354
+ aten.signbit.out,
355
+ aten.silu.default,
356
+ aten.silu.out,
357
+ aten.sin.default,
358
+ aten.sin.out,
359
+ aten.sin_.default,
360
+ aten.sinc.default,
361
+ aten.sinc.out,
362
+ aten.sinc_.default,
363
+ aten.sinh.default,
364
+ aten.sinh.out,
365
+ aten.sinh_.default,
366
+ aten.sqrt.default,
367
+ aten.sqrt.out,
368
+ aten.sqrt_.default,
369
+ aten.square.default,
370
+ aten.square.out,
371
+ aten.square_.default,
372
+ aten.sub.Scalar,
373
+ aten.sub.Tensor,
374
+ aten.sub.out,
375
+ aten.sub_.Scalar,
376
+ aten.sub_.Tensor,
377
+ aten.tan.default,
378
+ aten.tan.out,
379
+ aten.tan_.default,
380
+ aten.tanh.default,
381
+ aten.tanh.out,
382
+ aten.tanh_.default,
383
+ aten.true_divide.Tensor,
384
+ aten.trunc.default,
385
+ aten.trunc.out,
386
+ aten.trunc_.default,
387
+ aten.where.self,
388
+ aten.where.self_out,
389
+ aten.xlogy.OutScalar_Self,
390
+ aten.xlogy.OutScalar_Other,
391
+ aten.xlogy.OutTensor,
392
+ aten.xlogy.Scalar_Other,
393
+ aten.xlogy.Scalar_Self,
394
+ aten.xlogy.Tensor,
395
+ aten.xlogy_.Scalar_Other,
396
+ aten.xlogy_.Tensor,
397
+ # backward point-wise ops
398
+ # please keep the entries below alphabetically sorted
399
+ aten.gelu_backward.default,
400
+ aten.sigmoid_backward.default,
401
+ aten.silu_backward.default,
402
+ aten.tanh_backward.default,
403
+ aten.threshold_backward.default,
404
+ ]
405
+
406
+
407
+ def pointwise_strategy(
408
+ mesh: DeviceMesh, op_schema: OpSchema, linearity: bool = False
409
+ ) -> OpStrategy:
410
+ max_shards_strategy_index = -1
411
+ max_shards = -1
412
+
413
+ if _is_inplace_op(op_schema.op):
414
+ # inplace op should follow the first arg strategy
415
+ followed_strategy = op_schema.args_schema[0]
416
+ elif _is_out_variant_op(op_schema.op):
417
+ # out variant op should follow the out kwarg strategy
418
+ followed_strategy = op_schema.kwargs_schema["out"]
419
+ else:
420
+ # normal pointwise op, we choose to follow the arg with
421
+ # the max shards in case operands needs reshard
422
+ for idx, arg_strategy in enumerate(op_schema.args_schema):
423
+ if not isinstance(arg_strategy, OpStrategy):
424
+ continue
425
+
426
+ arg_max_shards = arg_strategy.max_num_shards()
427
+ if arg_max_shards > max_shards:
428
+ max_shards_strategy_index = idx
429
+ max_shards = arg_max_shards
430
+
431
+ followed_strategy = op_schema.args_schema[max_shards_strategy_index]
432
+
433
+ assert isinstance(
434
+ followed_strategy, OpStrategy
435
+ ), f"no strategy to follow for {op_schema}!"
436
+ return common_pointwise_strategy(
437
+ mesh, op_schema.args_schema, followed_strategy, linearity
438
+ )
439
+
440
+
441
+ def common_pointwise_strategy(
442
+ mesh: DeviceMesh,
443
+ args_schema: Sequence[object],
444
+ followed_strategy: OpStrategy,
445
+ linearity: bool,
446
+ ) -> OpStrategy:
447
+ # handle broadcasting
448
+ common_shape = torch.broadcast_shapes(
449
+ *[arg.output_shape for arg in args_schema if isinstance(arg, OpStrategy)]
450
+ )
451
+ pointwise_strategy = OpStrategy([])
452
+
453
+ for placement_strategy in followed_strategy.strategies:
454
+ spec_to_follow = placement_strategy.output_spec
455
+ out_placements: List[Placement] = []
456
+ for placement in spec_to_follow.placements:
457
+ if isinstance(placement, Shard):
458
+ shard_dim = normalize_dim(placement.dim, len(spec_to_follow.shape))
459
+ common_ndim = len(common_shape)
460
+ new_shard_dim = common_ndim - len(spec_to_follow.shape) + shard_dim
461
+ out_placements.append(Shard(new_shard_dim))
462
+ elif isinstance(placement, _Partial) and not linearity:
463
+ # clear the partial placemnet if op does not support linearity
464
+ # by default we just replicate the partial, need to see if this
465
+ # is optimal for all cases
466
+ out_placements.append(Replicate())
467
+ else:
468
+ out_placements.append(placement)
469
+
470
+ input_specs: List[DTensorSpec] = []
471
+ redistribute_costs: List[List[float]] = []
472
+ for idx, input_arg in enumerate(args_schema):
473
+ if isinstance(input_arg, OpStrategy):
474
+ # every arg follow the out_placements, but need to handle broadcasting
475
+ input_arg_spec = input_arg.strategies[0].output_spec
476
+ input_arg_dims_map = infer_broadcast_dims_map(
477
+ common_shape, input_arg_spec.shape
478
+ )
479
+ input_target_placements = map_placements_after_broadcast(
480
+ tuple(out_placements),
481
+ common_shape,
482
+ input_arg_dims_map,
483
+ )
484
+ input_arg_target_spec = DTensorSpec(
485
+ mesh=mesh,
486
+ placements=input_target_placements,
487
+ tensor_meta=input_arg_spec.tensor_meta,
488
+ )
489
+ input_specs.append(input_arg_target_spec)
490
+ redistribute_costs.append(
491
+ generate_redistribute_costs(input_arg, input_arg_target_spec)
492
+ )
493
+
494
+ pointwise_strategy.strategies.append(
495
+ PlacementStrategy(
496
+ output_specs=DTensorSpec(
497
+ mesh=mesh,
498
+ placements=tuple(out_placements),
499
+ ),
500
+ input_specs=input_specs,
501
+ redistribute_cost=redistribute_costs,
502
+ )
503
+ )
504
+ return pointwise_strategy
505
+
506
+
507
+ def linear_pointwise_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
508
+ """
509
+ Linear pointwise operators can propagate pending reductions.
510
+ For example, c = add(a, b); if a is pending sum, then c will be
511
+ pending sum as well without any communication overhead.
512
+ """
513
+ return pointwise_strategy(mesh, op_schema, linearity=True)
514
+
515
+
516
+ for op in linear_pointwise_ops:
517
+ register_op_strategy(op, schema_info=RuntimeSchemaInfo(static_kwargkey=["out"]))(
518
+ linear_pointwise_strategy
519
+ )
520
+
521
+ for op in pointwise_ops:
522
+ register_op_strategy(op, schema_info=RuntimeSchemaInfo(static_kwargkey=["out"]))(
523
+ pointwise_strategy
524
+ )
525
+
526
+
527
+ # TODO: add all for_each ops
528
+ for_each_ops = [
529
+ aten._foreach_abs_.default,
530
+ aten._foreach_addcdiv_.Scalar,
531
+ aten._foreach_addcdiv_.ScalarList,
532
+ aten._foreach_addcdiv_.Tensor,
533
+ aten._foreach_addcmul.Scalar,
534
+ aten._foreach_addcmul_.Scalar,
535
+ aten._foreach_addcmul_.ScalarList,
536
+ aten._foreach_addcmul_.Tensor,
537
+ aten._foreach_div_.List,
538
+ aten._foreach_div_.ScalarList,
539
+ aten._foreach_lerp_.Scalar,
540
+ aten._foreach_maximum_.List,
541
+ aten._foreach_mul.Scalar,
542
+ aten._foreach_mul.List,
543
+ aten._foreach_mul_.Scalar,
544
+ aten._foreach_mul_.ScalarList,
545
+ aten._foreach_mul_.Tensor,
546
+ aten._foreach_mul_.List,
547
+ aten._foreach_neg.default,
548
+ aten._foreach_neg_.default,
549
+ aten._foreach_reciprocal_.default,
550
+ aten._foreach_sub_.Scalar,
551
+ aten._foreach_sqrt.default,
552
+ aten._foreach_sqrt_.default,
553
+ aten._foreach_zero_.default,
554
+ ]
555
+
556
+ for_each_linearity_ops = [
557
+ aten._foreach_add.Scalar,
558
+ aten._foreach_add_.Scalar,
559
+ aten._foreach_add_.ScalarList,
560
+ aten._foreach_add.List,
561
+ aten._foreach_add_.List,
562
+ ]
563
+
564
+
565
+ def foreach_list_pointwise_strategy(
566
+ mesh: DeviceMesh, op_schema: OpSchema, linearity: bool = False
567
+ ) -> StrategyType:
568
+ """
569
+ Apply the pointwise strategy to the zipped arguments. For example, if we
570
+ run a foreach add of two lists l1 and l2, then we apply the pointwise
571
+ strategy on each pair (l1[i], l2[i]). If the first argument is a list but
572
+ the second (or later) one is a tensor, then we broadcast the tensor by
573
+ replicating it into a list with the length of the first argument.
574
+ """
575
+
576
+ def args_tuple_strategies(args_schema: Tuple[object, ...]) -> List[TupleStrategy]:
577
+ first_arg = args_schema[0]
578
+ assert isinstance(first_arg, TupleStrategy)
579
+ strategy_len = len(first_arg.childs)
580
+ tuple_strategies: List[TupleStrategy] = []
581
+ for arg_idx, arg in enumerate(args_schema):
582
+ if isinstance(arg, TupleStrategy):
583
+ # every tuple strategy should have the same length
584
+ assert len(arg.childs) == strategy_len
585
+ tuple_strategies.append(arg)
586
+ elif isinstance(arg, OpStrategy):
587
+ if arg_idx > 0: # implicitly broadcast
588
+ tuple_strategies.append(
589
+ TupleStrategy([arg for _ in range(strategy_len)])
590
+ )
591
+ else:
592
+ raise RuntimeError(
593
+ f"foreach list op only supports tuple strategy! {op_schema}"
594
+ )
595
+ return tuple_strategies
596
+
597
+ args_strategies = args_tuple_strategies(op_schema.args_schema)
598
+ follow_strategy: TupleStrategy = args_strategies[0]
599
+ foreach_strategy_list: List[OpStrategy] = []
600
+ for child_idx, child_strtgy in enumerate(follow_strategy.childs):
601
+ assert isinstance(child_strtgy, OpStrategy)
602
+ args_schema: List[StrategyType] = [
603
+ arg_strategy.childs[child_idx] for arg_strategy in args_strategies
604
+ ]
605
+ pointwise_strategy: OpStrategy = common_pointwise_strategy(
606
+ mesh, args_schema, child_strtgy, linearity
607
+ )
608
+ foreach_strategy_list.append(pointwise_strategy)
609
+ return TupleStrategy(foreach_strategy_list)
610
+
611
+
612
+ def foreach_list_linear_pointwise_strategy(
613
+ mesh: DeviceMesh, op_schema: OpSchema
614
+ ) -> StrategyType:
615
+ """
616
+ for each list op stratgy that supports linearity
617
+ """
618
+ return foreach_list_pointwise_strategy(mesh, op_schema, linearity=True)
619
+
620
+
621
+ for op in for_each_ops:
622
+ register_op_strategy(op, schema_info=RuntimeSchemaInfo(needs_pytree=True))(
623
+ foreach_list_pointwise_strategy
624
+ )
625
+
626
+ for op in for_each_linearity_ops:
627
+ register_op_strategy(op, schema_info=RuntimeSchemaInfo(needs_pytree=True))(
628
+ foreach_list_linear_pointwise_strategy
629
+ )
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/tensor_ops.py ADDED
@@ -0,0 +1,826 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ import itertools
3
+ from typing import cast, List, Optional, Sequence, Tuple
4
+
5
+ import torch
6
+
7
+ from torch.distributed._tensor._utils import compute_local_shape
8
+ from torch.distributed._tensor.op_schema import (
9
+ OpSchema,
10
+ OpStrategy,
11
+ OutputSharding,
12
+ PlacementStrategy,
13
+ RuntimeSchemaInfo,
14
+ StrategyType,
15
+ TupleStrategy,
16
+ )
17
+ from torch.distributed._tensor.ops.common_rules import pointwise_rule
18
+ from torch.distributed._tensor.ops.embedding_ops import _MaskPartial
19
+ from torch.distributed._tensor.ops.utils import (
20
+ generate_redistribute_costs,
21
+ is_tensor_dim_sharded,
22
+ is_tensor_partial,
23
+ is_tensor_shardable,
24
+ normalize_dim,
25
+ prod,
26
+ register_op_strategy,
27
+ register_prop_rule,
28
+ )
29
+ from torch.distributed._tensor.placement_types import (
30
+ _Partial,
31
+ DTensorSpec,
32
+ Placement,
33
+ Replicate,
34
+ Shard,
35
+ )
36
+ from torch.distributed.device_mesh import DeviceMesh
37
+
38
+
39
+ aten = torch.ops.aten
40
+
41
+
42
+ def default_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
43
+ # Default strategy by default just propagate the first input strategy
44
+ select_strategy = op_schema.args_schema[0]
45
+ assert isinstance(select_strategy, OpStrategy)
46
+ default_strategy = []
47
+ for strategy in select_strategy.strategies:
48
+ # we create new DTensorSpecs even for default strategy to assure that
49
+ # the tensor metas are distinct between the arguments and outputs
50
+ default_strategy.append(
51
+ PlacementStrategy(
52
+ output_specs=DTensorSpec(
53
+ mesh=strategy.output_spec.mesh,
54
+ placements=strategy.output_spec.placements,
55
+ )
56
+ )
57
+ )
58
+ return OpStrategy(default_strategy)
59
+
60
+
61
+ register_op_strategy(
62
+ [
63
+ aten.clone.default,
64
+ aten.contiguous.default,
65
+ aten.copy_.default,
66
+ aten.detach.default,
67
+ aten.fill_.Scalar,
68
+ aten.zero_.default,
69
+ ]
70
+ )(default_strategy)
71
+
72
+ register_op_strategy(
73
+ aten._to_copy.default, schema_info=RuntimeSchemaInfo(static_kwargkey=["dtype"])
74
+ )(default_strategy)
75
+
76
+
77
+ @register_op_strategy(
78
+ [
79
+ aten.equal.default,
80
+ aten.is_same_size.default,
81
+ ]
82
+ )
83
+ def equal_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
84
+ # equal_strategy deals with ops that comparing two tensor, we need to make sure
85
+ # sharding layout the same with two operands, we choose to follow the arg with max
86
+ # num of shards, still keep is_same_size here for completeness as they share the
87
+ # same strategy in theory.
88
+ self_strategy, other_strategy = op_schema.args_schema
89
+ assert isinstance(self_strategy, OpStrategy)
90
+ assert isinstance(other_strategy, OpStrategy)
91
+
92
+ select_strategy = (
93
+ self_strategy
94
+ if self_strategy.max_num_shards() >= other_strategy.max_num_shards()
95
+ else other_strategy
96
+ )
97
+ equal_strategy = OpStrategy([])
98
+
99
+ for arg_strategy in select_strategy.strategies:
100
+ arg_spec = arg_strategy.output_spec
101
+ if is_tensor_partial(arg_spec):
102
+ # if the arg_spec have partial, reshard to replicate
103
+ # otherwise local shard tensor comparison would be invalid
104
+ output_spec = DTensorSpec(
105
+ mesh=arg_spec.mesh,
106
+ placements=tuple(
107
+ Replicate() if isinstance(p, _Partial) else p
108
+ for p in arg_spec.placements
109
+ ),
110
+ )
111
+ equal_strategy.strategies.append(
112
+ PlacementStrategy(output_specs=output_spec)
113
+ )
114
+ else:
115
+ equal_strategy.strategies.append(PlacementStrategy(arg_spec))
116
+ return equal_strategy
117
+
118
+
119
+ @register_op_strategy(
120
+ [
121
+ aten.empty_like.default,
122
+ aten.ones_like.default,
123
+ aten.rand_like.default,
124
+ aten.randn_like.default,
125
+ aten.zeros_like.default,
126
+ ],
127
+ schema_info=RuntimeSchemaInfo(1, ["dtype"]),
128
+ )
129
+ @register_op_strategy(
130
+ [aten.full_like.default],
131
+ schema_info=RuntimeSchemaInfo(2, ["dtype"]),
132
+ )
133
+ @register_op_strategy(
134
+ [
135
+ aten.randint_like.default,
136
+ aten.randint_like.low_dtype,
137
+ aten.randint_like.low_dtype_out,
138
+ ],
139
+ schema_info=RuntimeSchemaInfo(3, ["dtype"]),
140
+ )
141
+ def create_like_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
142
+ # create_like_strategy deals with ops that creating tensors with same
143
+ # shape as input, but with specific content that does not depend on
144
+ # the input, we can propagate sharding, but we have to make sure we
145
+ # move from partial to replicated.
146
+ select_strategy = op_schema.args_schema[0]
147
+ create_like_strategy = OpStrategy([])
148
+ assert isinstance(select_strategy, OpStrategy)
149
+ for arg_strategy in select_strategy.strategies:
150
+ arg_spec = arg_strategy.output_spec
151
+ if is_tensor_partial(arg_spec):
152
+ # if the arg_spec have partial, accept partial
153
+ # in the input_specs but output replicate for
154
+ # those corresponding mesh dims
155
+ output_spec = DTensorSpec(
156
+ mesh=arg_spec.mesh,
157
+ placements=tuple(
158
+ Replicate() if isinstance(p, _Partial) else p
159
+ for p in arg_spec.placements
160
+ ),
161
+ )
162
+ create_like_strategy.strategies.append(
163
+ PlacementStrategy(output_specs=output_spec, input_specs=(arg_spec,))
164
+ )
165
+
166
+ else:
167
+ create_like_strategy.strategies.append(PlacementStrategy(arg_spec))
168
+
169
+ return create_like_strategy
170
+
171
+
172
+ @register_op_strategy(
173
+ [
174
+ aten.new_empty.default,
175
+ aten.new_full.default,
176
+ aten.new_ones.default,
177
+ aten.new_zeros.default,
178
+ aten.new_empty_strided.default, # TODO: re-think new_empty_strided
179
+ ],
180
+ schema_info=RuntimeSchemaInfo(1, ["dtype"]),
181
+ )
182
+ def new_factory_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
183
+ # TODO: maybe we should generate all possible shardings intead of just stay
184
+ # replicated for new factory methods
185
+ input_strategy = op_schema.args_schema[0]
186
+ new_factory_strategy = OpStrategy([])
187
+ assert isinstance(input_strategy, OpStrategy)
188
+ for arg_strategy in input_strategy.strategies:
189
+ input_spec = arg_strategy.output_spec
190
+ replica_spec = DTensorSpec(mesh, tuple([Replicate()] * mesh.ndim))
191
+ new_factory_strategy.strategies.append(
192
+ PlacementStrategy(output_specs=replica_spec, input_specs=(input_spec,))
193
+ )
194
+
195
+ return new_factory_strategy
196
+
197
+
198
+ @register_op_strategy(aten.bucketize.Tensor)
199
+ def gen_bucketize_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
200
+ """Just propagate input sharding, but expect replicated for boundaries input."""
201
+ input_strategy = op_schema.args_schema[0]
202
+ bucketize_strategy = OpStrategy([])
203
+ assert isinstance(input_strategy, OpStrategy)
204
+ for arg_strategy in input_strategy.strategies:
205
+ arg_spec = DTensorSpec(mesh, arg_strategy.output_spec.placements)
206
+ replica_spec = DTensorSpec(mesh, tuple([Replicate()] * mesh.ndim))
207
+ bucketize_strategy.strategies.append(
208
+ PlacementStrategy(
209
+ output_specs=arg_spec, input_specs=(arg_spec, replica_spec)
210
+ )
211
+ )
212
+
213
+ return bucketize_strategy
214
+
215
+
216
+ @register_op_strategy(aten.slice.Tensor, schema_info=RuntimeSchemaInfo(1))
217
+ def gen_slice_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
218
+ """Forward all shardings except the slice dimension."""
219
+ defaults = (None, 0, None, None, 1)
220
+ input_strategy, dim, start, end, step = (
221
+ op_schema.args_schema + defaults[len(op_schema.args_schema) :]
222
+ )
223
+ assert isinstance(input_strategy, OpStrategy)
224
+ input_shape = input_strategy.output_shape
225
+ input_ndim = input_strategy.output_ndim
226
+ assert isinstance(dim, int)
227
+ if start is None:
228
+ start = 0
229
+ if end is None or end > input_shape[dim]:
230
+ end = input_shape[dim]
231
+ assert isinstance(start, int)
232
+ assert isinstance(end, int)
233
+ assert isinstance(step, int)
234
+
235
+ # normalize args
236
+ slice_dim = normalize_dim(dim, input_ndim)
237
+ start = normalize_dim(start, input_shape[dim])
238
+ end = normalize_dim(end, input_shape[dim])
239
+
240
+ redundant_slice = start == 0 and end == input_shape[dim] and step == 1
241
+
242
+ slice_strategy = OpStrategy([])
243
+
244
+ for arg_strategy in input_strategy.strategies:
245
+ arg_spec = arg_strategy.output_spec
246
+ if not is_tensor_dim_sharded(arg_spec, dim=slice_dim) or redundant_slice:
247
+ # only add the strategy if the slice dim is not sharded
248
+ out_spec = DTensorSpec(mesh, arg_spec.placements)
249
+ slice_strategy.strategies.append(PlacementStrategy(output_specs=out_spec))
250
+ if not slice_strategy.strategies:
251
+ # if all strategies are filtered out, unsharding all specs on slice dim
252
+ # of the input strategy, and use that as the op strategy
253
+ for arg_strategy in input_strategy.strategies:
254
+ arg_spec = arg_strategy.output_spec
255
+ unshard_spec = DTensorSpec(
256
+ mesh, unshard_tensor_dim(arg_spec.placements, dim=slice_dim)
257
+ )
258
+ slice_strategy.strategies.append(
259
+ PlacementStrategy(output_specs=unshard_spec)
260
+ )
261
+ return slice_strategy
262
+
263
+
264
+ def unshard_tensor_dim(
265
+ placements: Sequence[Placement], dim: int
266
+ ) -> Tuple[Placement, ...]:
267
+ """Disallow the given tensor dimension to be sharded."""
268
+ return tuple(
269
+ p if (not isinstance(p, Shard) or p.dim != dim) else Replicate()
270
+ for p in placements
271
+ )
272
+
273
+
274
+ def replicate_tensor_dim(
275
+ placements: Sequence[Placement], dim: int
276
+ ) -> Tuple[Placement, ...]:
277
+ """Force the given tensor dimension to be replicated."""
278
+ # Not using p.is_shard() to avoid mypy complain about Placement not having
279
+ # attribute dim.
280
+ return tuple(
281
+ Replicate() if p.is_partial() or isinstance(p, Shard) and p.dim == dim else p
282
+ for p in placements
283
+ )
284
+
285
+
286
+ @register_op_strategy(aten.slice_scatter.default, schema_info=RuntimeSchemaInfo(2))
287
+ def gen_slice_scatter_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
288
+ # 1. number of dimensions in input and src need to match.
289
+ # 2. number of elements on all non-dim need to match between input and src.
290
+ # 3. numer of elements in src in dim need to match the slice size.
291
+ # Given the above:
292
+ # - We suggest for src to follow the sharding of input, except on the scatter dimension,
293
+ # where our best bet for now is to make them replicated as a fall-back.
294
+ # TODO: Ideally we'd like to make sure the output is re-sharded afterwards to keep input sharding.
295
+
296
+ input_strategy = op_schema.args_schema[0]
297
+ assert isinstance(input_strategy, OpStrategy)
298
+ input_ndim = input_strategy.output_ndim
299
+ slice_dim = (
300
+ cast(int, op_schema.args_schema[2]) if len(op_schema.args_schema) > 2 else 0
301
+ )
302
+ slice_dim = normalize_dim(slice_dim, input_ndim)
303
+
304
+ slice_scatter_strategy = OpStrategy([])
305
+ # by default follow the input strategy for both input and src
306
+ for arg_strategy in input_strategy.strategies:
307
+ arg_spec = arg_strategy.output_spec
308
+ if not (
309
+ is_tensor_dim_sharded(arg_spec, dim=slice_dim)
310
+ or is_tensor_partial(arg_spec)
311
+ ):
312
+ # only add the strategy if the slice_scatter dim is not sharded or partial
313
+ slice_scatter_strategy.strategies.append(
314
+ PlacementStrategy(output_specs=arg_spec)
315
+ )
316
+
317
+ if not slice_scatter_strategy.strategies:
318
+ # if all strategies are filtered out, replicating all specs on slice_scatter dim
319
+ # of the input strategy, and use that as the op strategy
320
+ for arg_strategy in input_strategy.strategies:
321
+ arg_spec = arg_strategy.output_spec
322
+ replicate_spec = DTensorSpec(
323
+ mesh, replicate_tensor_dim(arg_spec.placements, dim=slice_dim)
324
+ )
325
+ slice_scatter_strategy.strategies.append(
326
+ PlacementStrategy(output_specs=replicate_spec)
327
+ )
328
+ return slice_scatter_strategy
329
+
330
+
331
+ @register_op_strategy(aten._local_scalar_dense.default)
332
+ def replica_only_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
333
+ """Only allow replication on the input/output."""
334
+ replicate_spec = DTensorSpec(mesh, tuple([Replicate()] * mesh.ndim))
335
+ return OpStrategy([PlacementStrategy(replicate_spec)])
336
+
337
+
338
+ @register_op_strategy(aten.gather.default)
339
+ def gather_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
340
+ input_strategy = cast(OpStrategy, op_schema.args_schema[0])
341
+ dim = cast(int, op_schema.args_schema[1])
342
+ index_strategy = cast(OpStrategy, op_schema.args_schema[2])
343
+
344
+ input_shape = input_strategy.output_shape
345
+ index_shape = index_strategy.output_shape
346
+
347
+ all_mesh_dim_strategies = []
348
+
349
+ for mesh_dim in range(mesh.ndim):
350
+ single_mesh_dim_strategies = []
351
+
352
+ # placement list stores placements of [output, input, index]
353
+ # first we always have replicate all for inputs and output
354
+ all_replicate: List[Placement] = [Replicate()] * 3
355
+ single_mesh_dim_strategies.append(all_replicate)
356
+
357
+ # input sharding, input sharded, index accepts mask partial, output follows index
358
+ # this only works when the input is sharded on the gather dimension, and
359
+ # index has size 1 on the gather dimension
360
+ if index_shape[dim] == 1:
361
+ index_partial_placement = _MaskPartial(logical_dim_size=input_shape[dim])
362
+ input_sharding = [
363
+ index_partial_placement,
364
+ Shard(dim),
365
+ index_partial_placement,
366
+ ]
367
+ single_mesh_dim_strategies.append(input_sharding)
368
+
369
+ # index sharding, input replicated, index sharded, output follows index
370
+ # this only works when the sharding dimension is the gather dimension
371
+ index_sharding = [Shard(dim), Replicate(), Shard(dim)]
372
+ single_mesh_dim_strategies.append(index_sharding)
373
+
374
+ all_mesh_dim_strategies.append(single_mesh_dim_strategies)
375
+
376
+ strategy_combs = itertools.product(*all_mesh_dim_strategies)
377
+
378
+ all_strategies = []
379
+ for strategy_comb in strategy_combs:
380
+ spec_list = []
381
+ for specs in zip(*strategy_comb):
382
+ spec_list.append(DTensorSpec(mesh, tuple(specs)))
383
+
384
+ if is_tensor_shardable(input_shape, spec_list[1]) and is_tensor_shardable(
385
+ index_shape, spec_list[2]
386
+ ):
387
+ input_spec, index_spec = spec_list[1:]
388
+ redistribute_cost = [
389
+ generate_redistribute_costs(input_strategy, input_spec),
390
+ generate_redistribute_costs(index_strategy, index_spec),
391
+ ]
392
+ strat = PlacementStrategy(
393
+ output_specs=spec_list[0],
394
+ input_specs=spec_list[1:],
395
+ redistribute_cost=redistribute_cost,
396
+ )
397
+ all_strategies.append(strat)
398
+
399
+ return OpStrategy(all_strategies)
400
+
401
+
402
+ @register_op_strategy(aten.stack.default, RuntimeSchemaInfo(1, needs_pytree=True))
403
+ def stack_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
404
+ args_schema = op_schema.args_schema
405
+ input_tuple_strategy = args_schema[0]
406
+ assert isinstance(input_tuple_strategy, TupleStrategy), f"{input_tuple_strategy}"
407
+ dim = cast(int, args_schema[1]) if len(args_schema) > 1 else 0
408
+
409
+ # Follow the 1st child strategy's placement strategies
410
+ child_strategy = input_tuple_strategy.childs[0]
411
+ assert isinstance(child_strategy, OpStrategy), f"{child_strategy}"
412
+ strategies: List[PlacementStrategy] = []
413
+
414
+ # For each arg strategy of the child to follow, we check if every other
415
+ # child has an equal strategy. If so, then that is a valid strategy. If
416
+ # there are no such valid strategies, then we replicate.
417
+ for arg_strategy in child_strategy.strategies:
418
+ arg_spec = arg_strategy.output_spec
419
+ # For each arg strategy (whether the one to follow or other), we
420
+ # replicate the stack dim since we cannot stack on a sharded dim
421
+ if is_tensor_dim_sharded(arg_spec, dim):
422
+ arg_spec = DTensorSpec(
423
+ mesh, unshard_tensor_dim(arg_spec.placements, dim=dim)
424
+ )
425
+ all_compatible = True
426
+ for other_child_strategy in input_tuple_strategy.childs[1:]:
427
+ has_compatible_strategy = False
428
+ assert isinstance(
429
+ other_child_strategy, OpStrategy
430
+ ), f"{other_child_strategy}"
431
+ for other_arg_strategy in other_child_strategy.strategies:
432
+ other_arg_spec = other_arg_strategy.output_spec
433
+ if is_tensor_dim_sharded(other_arg_spec, dim):
434
+ other_arg_spec = DTensorSpec(
435
+ mesh, unshard_tensor_dim(other_arg_spec.placements, dim=dim)
436
+ )
437
+ if other_arg_spec.placements == arg_spec.placements:
438
+ has_compatible_strategy = True
439
+ break
440
+ if not has_compatible_strategy:
441
+ all_compatible = False
442
+ break
443
+ if all_compatible:
444
+ input_specs = tuple(
445
+ arg_spec for _ in range(len(input_tuple_strategy.childs))
446
+ )
447
+ strategies.append(
448
+ PlacementStrategy(
449
+ output_specs=DTensorSpec(mesh, arg_spec.placements),
450
+ input_specs=input_specs,
451
+ )
452
+ )
453
+ if not strategies:
454
+ # Arbitrarily use each child strategy's 0th strategy's output spec
455
+ input_specs = tuple(
456
+ cast(OpStrategy, child_strategy).strategies[0].output_spec
457
+ for child_strategy in input_tuple_strategy.childs
458
+ )
459
+ replicate_spec = DTensorSpec(mesh, tuple(Replicate() for _ in range(mesh.ndim)))
460
+ strategies.append(PlacementStrategy(output_specs=replicate_spec))
461
+ return OpStrategy(strategies)
462
+
463
+
464
+ @register_prop_rule(aten.index_select.default, schema_info=RuntimeSchemaInfo(1))
465
+ def prop_index_select(op_schema: OpSchema) -> OutputSharding:
466
+ values_spec, dim, indices_spec = op_schema.args_schema
467
+
468
+ assert isinstance(values_spec, DTensorSpec)
469
+ assert isinstance(dim, int)
470
+ assert isinstance(indices_spec, DTensorSpec)
471
+
472
+ all_indices_spec: List[Optional[DTensorSpec]] = [
473
+ indices_spec if dim == i else None for i in range(values_spec.ndim)
474
+ ]
475
+
476
+ result = prop_index(
477
+ OpSchema(
478
+ op=op_schema.op,
479
+ args_schema=(values_spec, all_indices_spec),
480
+ kwargs_schema=op_schema.kwargs_schema,
481
+ )
482
+ )
483
+ if result.schema_suggestions:
484
+ result.schema_suggestions = [
485
+ OpSchema(
486
+ op=op_schema.op,
487
+ args_schema=(s.args_schema[0], dim, s.args_schema[1][dim]),
488
+ kwargs_schema=op_schema.kwargs_schema,
489
+ )
490
+ for s in result.schema_suggestions
491
+ ]
492
+ return result
493
+
494
+
495
+ @register_prop_rule(aten.index.Tensor, schema_info=RuntimeSchemaInfo(needs_pytree=True))
496
+ def prop_index(op_schema: OpSchema) -> OutputSharding:
497
+ """
498
+ Expect replicated on the first input; _mostly_ pointwise on the second input.
499
+
500
+ TODO: exception: when the dtype of second input is "bool", then a torch.nonzero needs to be triggered first.
501
+ """
502
+ # Current sharding constraints:
503
+ # For values:
504
+ # 1. We currently require that the dimension of values_spec be replicated or partial
505
+ # if they are being indexed on.
506
+ # 2. Other dimensions of values_spec can remain sharded if they are so.
507
+ # For indices:
508
+ # Indices can be either sharded or replicated. All index tensors need to be sharded
509
+ # in a compatible way, following the pointwise rule (including resolving _Partial
510
+ # into either sharded or replicated)
511
+
512
+ values_spec, multi_indices_spec = op_schema.args_schema
513
+ assert isinstance(values_spec, DTensorSpec)
514
+ assert isinstance(multi_indices_spec, list)
515
+ multi_indices_spec = cast(List[Optional[DTensorSpec]], multi_indices_spec)
516
+ valid_indices_spec: List[Tuple[int, DTensorSpec]] = [
517
+ (i, a) for i, a in enumerate(multi_indices_spec) if a is not None
518
+ ]
519
+
520
+ # 1. All indices have to be sharded equally. Moreover, indices can be broadcast.
521
+ # Here, we piggyback on the pointwise sharding rule for indices.
522
+ indices_out = pointwise_rule(
523
+ OpSchema(
524
+ op=op_schema.op,
525
+ args_schema=tuple(v[1] for v in valid_indices_spec),
526
+ kwargs_schema={},
527
+ )
528
+ )
529
+ need_reshard_on_indices = indices_out.output_spec is None
530
+
531
+ if not need_reshard_on_indices:
532
+ # this means that our inputs are already sharded properly and we will use that as our indices_spec
533
+ assert isinstance(indices_out.output_spec, DTensorSpec)
534
+ indices_spec: DTensorSpec = indices_out.output_spec
535
+ else:
536
+ assert indices_out.schema_suggestions is not None
537
+ valid_indices_suggestion = indices_out.schema_suggestions[0]
538
+ for i, v in enumerate(valid_indices_suggestion.args_spec):
539
+ multi_indices_spec[valid_indices_spec[i][0]] = v
540
+ # we'll need to call pointwise_rule again to see what's our ideal indices_spec and then
541
+ # use that to compute our ideal values_spec
542
+ indices_output_spec = pointwise_rule(valid_indices_suggestion).output_spec
543
+ assert isinstance(indices_output_spec, DTensorSpec)
544
+ indices_spec = indices_output_spec
545
+
546
+ lookup_dims = {v[0] for v in valid_indices_spec}
547
+
548
+ need_reshard_on_values = tuple(
549
+ (isinstance(vp, Shard) and (vp.dim in lookup_dims or isinstance(ip, Shard)))
550
+ for vp, ip in zip(values_spec.placements, indices_spec.placements)
551
+ )
552
+
553
+ if not need_reshard_on_indices and not any(need_reshard_on_values):
554
+ value_placements = values_spec.placements
555
+
556
+ all_dims_consecutive = all(
557
+ b[0] - a[0] == 1
558
+ for b, a in zip(valid_indices_spec[1:], valid_indices_spec[:-1])
559
+ )
560
+ if all_dims_consecutive:
561
+ # if all index vectors are consecutives, insert at the dimension of the first index
562
+ insert_dim: int = valid_indices_spec[0][0]
563
+ else:
564
+ # else, insert on the first dimension
565
+ insert_dim = 0
566
+
567
+ def place(vp: Placement, ip: Placement) -> Placement:
568
+ if isinstance(vp, Shard):
569
+ return Shard(
570
+ vp.dim
571
+ if vp.dim < insert_dim
572
+ # accounts for the offset in output dimensions
573
+ else vp.dim
574
+ + indices_spec.ndim
575
+ - sum(1 if vp.dim > v[0] else 0 for v in valid_indices_spec)
576
+ )
577
+ if isinstance(ip, Shard):
578
+ return Shard(ip.dim + insert_dim)
579
+ # _Partial or Replicated
580
+ return vp
581
+
582
+ value_placements = tuple(
583
+ place(vp, ip)
584
+ for vp, ip in zip(values_spec.placements, indices_spec.placements)
585
+ )
586
+ result = OutputSharding(
587
+ output_spec=DTensorSpec(
588
+ mesh=values_spec.mesh,
589
+ placements=value_placements,
590
+ )
591
+ )
592
+ return result
593
+ else:
594
+ result = OutputSharding(
595
+ output_spec=None,
596
+ schema_suggestions=[
597
+ OpSchema(
598
+ op=op_schema.op,
599
+ args_schema=(
600
+ DTensorSpec(
601
+ mesh=values_spec.mesh,
602
+ placements=tuple(
603
+ [
604
+ Replicate() if need_reshard_on_values[i] else v
605
+ for i, v in enumerate(values_spec.placements)
606
+ ]
607
+ ),
608
+ tensor_meta=values_spec.tensor_meta,
609
+ ),
610
+ multi_indices_spec,
611
+ ),
612
+ kwargs_schema=op_schema.kwargs_schema,
613
+ )
614
+ ],
615
+ )
616
+ return result
617
+
618
+
619
+ @register_prop_rule(
620
+ aten.cat.default, schema_info=RuntimeSchemaInfo(1, needs_pytree=True)
621
+ )
622
+ def cat_rule(op_schema: OpSchema) -> OutputSharding:
623
+ # torch.cat requires all tensors must either have the same shape (except
624
+ # in the concatenating dimension) or be "empty". "Empty" here strictly means
625
+ # tensor.shape is torch.Size([0]). When tensor.ndim > 1, it will be treated
626
+ # as a non-empty tensor and the shape must match on non-cat dimensions.
627
+ def is_empty(spec: DTensorSpec) -> bool:
628
+ return list(spec.shape) == [0]
629
+
630
+ # the first arg is a list of input tensor specs
631
+ tensor_list_specs = cast(List[DTensorSpec], op_schema.args_schema[0])
632
+ assert len(tensor_list_specs) > 0, "torch.cat expects a non-empty list of tensors"
633
+ non_empty_specs = [spec for spec in tensor_list_specs if not is_empty(spec)]
634
+
635
+ if len(non_empty_specs) == 0:
636
+ # all tensors are empty, we can return any output sharding
637
+ return OutputSharding(
638
+ output_spec=DTensorSpec(
639
+ mesh=tensor_list_specs[0].mesh,
640
+ placements=tensor_list_specs[0].placements,
641
+ )
642
+ )
643
+
644
+ assert all(
645
+ spec.ndim == non_empty_specs[0].ndim for spec in non_empty_specs
646
+ ), f"Expect all tensors to have same shape or empty, but got {tensor_list_specs}"
647
+ assert all(
648
+ spec.mesh == tensor_list_specs[0].mesh for spec in tensor_list_specs
649
+ ), f"Expect all tensors to have same mesh, but got {tensor_list_specs}"
650
+
651
+ # ndim will also be the result's ndim
652
+ ndim = 1
653
+ for spec in tensor_list_specs:
654
+ ndim = max(ndim, spec.ndim)
655
+
656
+ dim = 0 # default dim = 0
657
+ if len(op_schema.args_schema) > 1:
658
+ dim = cast(int, op_schema.args_schema[1])
659
+ dim = normalize_dim(dim, ndim)
660
+
661
+ # Make sure all tensors are replicated on cat dimension
662
+ need_reshard = False
663
+ tensor_list_specs_after: List[DTensorSpec] = []
664
+ for spec in tensor_list_specs:
665
+ if not is_empty(spec) and (
666
+ is_tensor_dim_sharded(spec, dim=dim) or is_tensor_partial(spec)
667
+ ):
668
+ need_reshard = True
669
+ tensor_list_specs_after.append(
670
+ DTensorSpec(
671
+ mesh=spec.mesh,
672
+ placements=replicate_tensor_dim(spec.placements, dim=dim),
673
+ tensor_meta=spec.tensor_meta,
674
+ )
675
+ )
676
+ else:
677
+ tensor_list_specs_after.append(spec)
678
+
679
+ tensor_list_specs = tensor_list_specs_after
680
+
681
+ # align non-cat dimensions placements based on reshard cost
682
+ non_empty_specs = [spec for spec in tensor_list_specs if not is_empty(spec)]
683
+ mesh = non_empty_specs[0].mesh
684
+ ndim = non_empty_specs[0].ndim
685
+ new_placements: List[Placement] = []
686
+ for mesh_dim in range(mesh.ndim):
687
+ # compute the minimum cost of resharding on this mesh_dim
688
+ if any(
689
+ spec.placements[mesh_dim] != non_empty_specs[0].placements[mesh_dim]
690
+ for spec in non_empty_specs
691
+ ):
692
+ # only reshard if there is a mismatch
693
+ need_reshard = True
694
+ reshard_cost = []
695
+ for shard_dim in range(ndim):
696
+ # compute the cost of resharding on this shard_dim
697
+ cost: float = 0.0
698
+ for spec in non_empty_specs:
699
+ global_shape = spec.shape
700
+ if global_shape[shard_dim] < mesh.size(mesh_dim):
701
+ # found one tensor where the shard_dim is smaller than
702
+ # mesh_dim. In this case, we cannot shard on this shard_dim,
703
+ # and hence set cost to infinity.
704
+ cost = +float("inf")
705
+ elif (
706
+ is_tensor_dim_sharded(spec, dim=shard_dim)
707
+ or prod(global_shape) == 0
708
+ ):
709
+ continue
710
+ else:
711
+ local_shape = compute_local_shape(
712
+ global_shape, spec.mesh, spec.placements
713
+ )
714
+ cost += prod(local_shape) * spec.mesh.size(mesh_dim)
715
+ reshard_cost.append(cost)
716
+ best_dim = reshard_cost.index(min(reshard_cost))
717
+ new_placements.append(Shard(best_dim))
718
+ else:
719
+ # no mismatch, keep the original placement
720
+ new_placements.append(non_empty_specs[0].placements[mesh_dim])
721
+
722
+ if need_reshard:
723
+ tensor_list_specs_after = []
724
+ for spec in tensor_list_specs:
725
+ if is_empty(spec):
726
+ tensor_list_specs_after.append(spec)
727
+ else:
728
+ tensor_list_specs_after.append(
729
+ DTensorSpec(
730
+ mesh=spec.mesh,
731
+ placements=tuple(new_placements),
732
+ tensor_meta=spec.tensor_meta,
733
+ )
734
+ )
735
+
736
+ return OutputSharding(
737
+ output_spec=None,
738
+ schema_suggestions=[
739
+ OpSchema(
740
+ op=op_schema.op,
741
+ args_schema=(
742
+ tuple(tensor_list_specs_after),
743
+ *op_schema.args_schema[1:],
744
+ ),
745
+ kwargs_schema=op_schema.kwargs_schema,
746
+ ),
747
+ ],
748
+ )
749
+ else:
750
+ # at this point, the cat dim is not sharded,
751
+ return OutputSharding(
752
+ output_spec=DTensorSpec(
753
+ mesh=non_empty_specs[0].mesh,
754
+ placements=non_empty_specs[0].placements,
755
+ ),
756
+ )
757
+
758
+
759
+ @register_prop_rule(
760
+ [
761
+ aten.split.Tensor,
762
+ aten.split_with_sizes.default,
763
+ aten.split_with_sizes_copy.default,
764
+ ],
765
+ schema_info=RuntimeSchemaInfo(1),
766
+ )
767
+ def split_rule(op_schema: OpSchema) -> OutputSharding:
768
+ output_spec_list: List[DTensorSpec] = []
769
+ input_spec = cast(DTensorSpec, op_schema.args_schema[0])
770
+ ndim = input_spec.ndim
771
+ split_size_or_sections = op_schema.args_schema[1]
772
+ dim = cast(int, op_schema.args_schema[2]) if len(op_schema.args_schema) > 2 else 0
773
+ dim = normalize_dim(dim, ndim)
774
+
775
+ # TODO: tensor to split cannot have _Partial
776
+ # in its placements for now. Will need to
777
+ # support in future.
778
+ if input_spec.sums:
779
+ raise NotImplementedError(
780
+ f"splitting distributed tensor with "
781
+ f"_Partial placement is not implemented!\n"
782
+ f"DTensorSpec={input_spec}"
783
+ )
784
+
785
+ # TODO: just like slice op, split replicates before
786
+ # splitting on a sharded dimension
787
+ need_reshard = False
788
+ if is_tensor_dim_sharded(input_spec, dim=dim):
789
+ need_reshard = True
790
+ input_spec = DTensorSpec(
791
+ mesh=input_spec.mesh,
792
+ placements=unshard_tensor_dim(input_spec.placements, dim=dim),
793
+ tensor_meta=input_spec.tensor_meta,
794
+ )
795
+
796
+ if need_reshard:
797
+ return OutputSharding(
798
+ None,
799
+ schema_suggestions=[
800
+ OpSchema(
801
+ op=op_schema.op,
802
+ args_schema=(input_spec,) + op_schema.args_schema[1:],
803
+ kwargs_schema=op_schema.kwargs_schema,
804
+ ),
805
+ ],
806
+ )
807
+
808
+ def size_split(N, i):
809
+ # Last chunk will be smaller if the tensor size N
810
+ # along the given dimension dim is not divisible by i.
811
+ assert i > 0
812
+ return [i] * (N // i) + ([N % i] if N % i != 0 else [])
813
+
814
+ output_size_list = (
815
+ size_split(input_spec.shape[dim], split_size_or_sections)
816
+ if isinstance(split_size_or_sections, int)
817
+ else split_size_or_sections
818
+ )
819
+ output_spec_list = [
820
+ DTensorSpec(
821
+ mesh=input_spec.mesh,
822
+ placements=input_spec.placements,
823
+ )
824
+ for _ in range(len(output_size_list))
825
+ ]
826
+ return OutputSharding(output_spec_list)
llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/utils.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ import functools
3
+ import operator
4
+ from typing import cast, Iterable, List, Sequence, Tuple, Union
5
+
6
+ import torch
7
+ from torch.distributed._tensor._collective_utils import redistribute_cost
8
+ from torch.distributed._tensor.api import DTensor
9
+ from torch.distributed._tensor.op_schema import OpStrategy, RuntimeSchemaInfo
10
+ from torch.distributed._tensor.placement_types import (
11
+ _Partial,
12
+ DTensorSpec,
13
+ Placement,
14
+ Replicate,
15
+ Shard,
16
+ )
17
+
18
+
19
+ # convenient wrapper to register sharding propagation rules
20
+ # pyre-fixme[3]: Return type must be annotated.
21
+ # pyre-fixme[2]: Parameter must be annotated.
22
+ def register_prop_rule(op, schema_info=None):
23
+ # pyre-fixme[53]: Captured variable `func` is not annotated.
24
+ # pyre-fixme[3]: Return type must be annotated.
25
+ # pyre-fixme[2]: Parameter must be annotated.
26
+ def wrapper(impl):
27
+ overloads = op if isinstance(op, list) else [op]
28
+ for overload in overloads:
29
+ DTensor._op_dispatcher.sharding_propagator.register_sharding_prop_rule(
30
+ overload, impl, schema_info
31
+ )
32
+ return impl
33
+
34
+ return wrapper
35
+
36
+
37
+ def register_op_strategy(op, schema_info=None):
38
+ # pyre-fixme[53]: Captured variable `func` is not annotated.
39
+ # pyre-fixme[3]: Return type must be annotated.
40
+ # pyre-fixme[2]: Parameter must be annotated.
41
+
42
+ # For every ATen op that accepts any args in this list,
43
+ # the arg itself can impact the strides (and potentially the sharding strategy)
44
+ # of the output tensor.
45
+ # thus, we will detect ATen schemas with any of these args and ensure
46
+ # that they get specialized here.
47
+ arg_names_that_require_specializing_cache_strategy = [
48
+ "memory_format",
49
+ ]
50
+
51
+ def wrapper(impl):
52
+ if isinstance(op, list):
53
+ overloads = op
54
+ else:
55
+ overloads = [op]
56
+
57
+ for overload in overloads:
58
+ curr_schema_info = None
59
+ if schema_info is None:
60
+ specialized_args = [
61
+ a.name
62
+ for a in overload._schema.arguments
63
+ if a.name in arg_names_that_require_specializing_cache_strategy
64
+ ]
65
+ if any(specialized_args):
66
+ curr_schema_info = RuntimeSchemaInfo(
67
+ static_kwargkey=specialized_args
68
+ )
69
+ else:
70
+ curr_schema_info = schema_info
71
+ DTensor._op_dispatcher.sharding_propagator.register_op_strategy(
72
+ overload, impl, curr_schema_info
73
+ )
74
+ return impl
75
+
76
+ return wrapper
77
+
78
+
79
+ def as_list(
80
+ x: Union[List[object], object]
81
+ # pyre-fixme[11]: Annotation `immutable_list` is not defined as a type.
82
+ ) -> Union[List[object], torch.fx.immutable_collections.immutable_list]: # type: ignore[valid-type]
83
+ # During tracing, `aten.sum.dim_IntList` uses `immutable_list` for its args,
84
+ # which is an object but treated as a list by the tracer. Therefore, keep
85
+ # `immutable_list` intact here as well.
86
+ if type(x) is list or isinstance(x, torch.fx.immutable_collections.immutable_list):
87
+ return x
88
+ else:
89
+ return [x]
90
+
91
+
92
+ def normalize_dim(dim: int, ndim: int) -> int:
93
+ return dim if dim >= 0 else dim + ndim
94
+
95
+
96
+ def normalize_dims(dims: Union[int, Sequence[int]], ndim: int) -> Sequence[int]:
97
+ """Normalize a dim or a sequence of dims, so that they are all positive."""
98
+ if isinstance(dims, int):
99
+ dims = (normalize_dim(dims, ndim),)
100
+ elif isinstance(dims, list):
101
+ dims = [normalize_dim(dim, ndim) for dim in dims]
102
+ elif isinstance(dims, tuple):
103
+ dims = tuple([normalize_dim(dim, ndim) for dim in dims])
104
+ return dims
105
+
106
+
107
+ def normalize_to_torch_size(size) -> torch.Size:
108
+ """
109
+ Unify variable types of size argument to torch.Size
110
+ Acceptable types include:
111
+ int, Sequence[int], Tuple[int], Tuple[Sequence[int]],
112
+ or torch.Size
113
+ """
114
+ if isinstance(size, torch.Size):
115
+ return size
116
+
117
+ if isinstance(size, int):
118
+ torch_size = [size]
119
+ elif len(size) == 1 and isinstance(size[0], Sequence):
120
+ torch_size = list(size[0])
121
+ else:
122
+ torch_size = list(size)
123
+ return torch.Size(torch_size)
124
+
125
+
126
+ def prod(xs: Iterable[int]) -> int:
127
+ return functools.reduce(operator.mul, xs, 1)
128
+
129
+
130
+ def is_tensor_shardable(shape: Sequence[int], spec: DTensorSpec) -> bool:
131
+ """Check if the shape is shardable according to the spec."""
132
+ # number of shards in each tensor dimension
133
+ shards_map = [1] * len(shape)
134
+ for i, placement in enumerate(spec.placements):
135
+ if placement.is_shard():
136
+ shard_dim = cast(Shard, placement).dim
137
+ shards_map[shard_dim] *= spec.mesh.size(i)
138
+
139
+ for i, dim_size in enumerate(shape):
140
+ # TODO: maybe we should determine is_shardable based on
141
+ # whether it's evenly sharded or not
142
+ if shards_map[i] > 1 and dim_size < shards_map[i]:
143
+ return False
144
+
145
+ return True
146
+
147
+
148
+ def is_tensor_evenly_shardable(shape: Sequence[int], spec: DTensorSpec) -> bool:
149
+ """Check if the shape is evenly shardable according to the spec."""
150
+ # number of shards in each tensor dimension
151
+ shards_map = [1] * len(shape)
152
+ for i, placement in enumerate(spec.placements):
153
+ if placement.is_shard():
154
+ shard_dim = cast(Shard, placement).dim
155
+ shards_map[shard_dim] *= spec.mesh.size(i)
156
+
157
+ for i, dim_size in enumerate(shape):
158
+ if shards_map[i] > 1 and (dim_size % shards_map[i] != 0):
159
+ return False
160
+
161
+ return True
162
+
163
+
164
+ def is_tensor_dim_sharded(spec: DTensorSpec, dim: int) -> bool:
165
+ """Return True if tensor dim is sharded."""
166
+ return any(p.is_shard(dim) for p in spec.placements)
167
+
168
+
169
+ def is_tensor_partial(spec: DTensorSpec) -> bool:
170
+ """Return True if tensor is partial on the mesh."""
171
+ return any(p.is_partial() for p in spec.placements)
172
+
173
+
174
+ def infer_broadcast_dims_map(
175
+ common_shape: torch.Size, input_shape: torch.Size
176
+ ) -> List[int]:
177
+ # infer the broadcast dims map, where it maps from the common shape dim to the input shape dim
178
+ # this is aligned with the broadcast semantics
179
+ common_ndim = len(common_shape)
180
+ input_ndim = len(input_shape)
181
+ broadcast_dims_map = [-1] * common_ndim
182
+ for idx in range(-1, -1 - input_ndim, -1):
183
+ if input_shape[idx] == common_shape[idx]:
184
+ broadcast_dims_map[common_ndim + idx] = input_ndim + idx
185
+ return broadcast_dims_map
186
+
187
+
188
+ def map_placements_after_broadcast(
189
+ placements: Tuple[Placement, ...],
190
+ shape: torch.Size,
191
+ broadcast_dims_map: List[int],
192
+ ) -> Tuple[Placement, ...]:
193
+ """Map each placement based on the output shape after broadcast."""
194
+ new_placements: List[Placement] = []
195
+ for placement in placements:
196
+ if isinstance(placement, (Replicate, _Partial)):
197
+ new_placements.append(placement)
198
+ else:
199
+ assert isinstance(placement, Shard)
200
+ shard_dim = normalize_dim(placement.dim, len(shape))
201
+ new_shard_dim = broadcast_dims_map[shard_dim]
202
+ if new_shard_dim != -1:
203
+ # there's a map from the common shape shard dim to
204
+ # the input shape shard dim before broadcasting,
205
+ # use that instead
206
+ new_placements.append(Shard(new_shard_dim))
207
+ else:
208
+ # there's no map between common shape shard dim and
209
+ # the input shape shard dim before broadcasting,
210
+ # in this case it means implicit broadcasting happen
211
+ # in this dim, so we can just mark it as replicate
212
+ # and implict broadcast will broadcast automatically
213
+ # to the sharded shape
214
+ new_placements.append(Replicate())
215
+
216
+ return tuple(new_placements)
217
+
218
+
219
+ def generate_redistribute_costs(
220
+ src_strategy: OpStrategy, dst_spec: DTensorSpec
221
+ ) -> List[float]:
222
+ redistribute_costs: List[float] = []
223
+ for strat in src_strategy.strategies:
224
+ redistribute_costs.append(redistribute_cost(strat.output_spec, dst_spec))
225
+
226
+ return redistribute_costs
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ """
10
+ Each host in a distributed PyTorch job runs with a single TorchElastic agent,
11
+ and multiple workers (as children processes of the TorchElastic agent).
12
+ Since the workers are user-provided (your PyTorch script/job), TorchElastic
13
+ has a way to propagate errors on the trainers through the agent and up to the
14
+ scheduler, which ultimately informs the end-user about the state of the job
15
+ and applies any retry policies.
16
+
17
+ TorchElastic categorizes errors into 3 categories:
18
+
19
+ +----------------+----------------+--------------------------------------------------------------+
20
+ | Category | Sub-Category | Description |
21
+ +================+================+==============================================================+
22
+ | User Error | Input Error | invalid inputs to TorchElastic APIs (e.g. min > max nodes) |
23
+ | +----------------+--------------------------------------------------------------+
24
+ | | Worker Failure | any failures on the worker child process |
25
+ +----------------+----------------+--------------------------------------------------------------+
26
+ | Platform Error | n/a | failures caused by the agent |
27
+ +----------------+----------------+--------------------------------------------------------------+
28
+ | Infra Error | n/a | failures outside the domain of the agent and workers |
29
+ | | | (e.g. host failures) |
30
+ +----------------+----------------+--------------------------------------------------------------+
31
+
32
+ All errors other than "Worker Failure" are either raised canonically from the
33
+ agent process or implicitly or explicitly crash the agent process. So the
34
+ standard language (python) provided exception handling strategies apply.
35
+
36
+ Worker Failures are special because the exception/failure originates on a different
37
+ process from the agent so the error needs to be propagated inter-process
38
+ (e.g. the agent cannot simply ``try-catch`` an exception raised on the worker process).
39
+
40
+ TorchElastic agents use :func:`torch.distributed.elastic.multiprocessing.start_processes`
41
+ to launch the workers which has a simple file based inter-process error propagation
42
+ built-in.
43
+
44
+ Any function or binary entrypoint decorated with :func:`record`
45
+ will write uncaught exceptions (with the trace information) to a file specified by the
46
+ environment variable ``TORCHELASTIC_ERROR_FILE``. The parent process (e.g. agent)
47
+ sets this env var on each child it launches, then aggregates the error files for all
48
+ children, and propagates the one with the **smallest** timestamp (e.g. the **first** error).
49
+ """
50
+
51
+ import json
52
+ import os
53
+ import signal
54
+ import socket
55
+ import time
56
+ import warnings
57
+ from dataclasses import dataclass, field
58
+ from datetime import datetime
59
+ from functools import wraps
60
+ from string import Template
61
+ from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar
62
+
63
+ from torch.distributed.elastic.utils.logging import get_logger
64
+
65
+ from .error_handler import ErrorHandler # noqa: F401
66
+ from .handlers import get_error_handler # noqa: F401
67
+
68
+ __all__ = ["ProcessFailure", "ChildFailedError", "record", "ErrorHandler", "get_error_handler"]
69
+
70
+ log = get_logger(__name__)
71
+
72
+
73
+ JSON = Dict
74
+
75
+ _EMPTY_ERROR_DATA = {"message": "<NONE>"}
76
+ _NOT_AVAILABLE = "<N/A>"
77
+
78
+ T = TypeVar("T")
79
+
80
+
81
+ @dataclass
82
+ class ProcessFailure:
83
+ """
84
+ Represent the failed process result. When the worker process fails, it may record failure root cause into the file.
85
+
86
+ Tries to read the failure timestamp from the provided ``error_file``,
87
+ if the ``error_file`` does not exist, the timestamp is the current
88
+ timestamp (seconds since epoch).
89
+
90
+ The ``message`` field is a concise explanation of the failure. If
91
+ the error file exists then the message is obtained from the error file.
92
+ Otherwise one is generated based on the failure signature.
93
+
94
+ .. note:: It is assumed that the ``error_file`` is written by
95
+ ``torch.distributed.elastic.multiprocessing.errors.error_handler.ErrorHandler``.
96
+ Otherwise the behavior is undefined.
97
+
98
+ """
99
+
100
+ local_rank: int
101
+ pid: int
102
+ exitcode: int
103
+ error_file: str
104
+ error_file_data: JSON = field(init=False)
105
+ message: str = field(init=False)
106
+ timestamp: int = field(init=False)
107
+
108
+ def __post_init__(self):
109
+ self.error_file_data = _EMPTY_ERROR_DATA
110
+ if os.path.isfile(self.error_file):
111
+ try:
112
+ with open(self.error_file) as fp:
113
+ self.error_file_data = json.load(fp)
114
+ log.debug(
115
+ "User process failed with error data: %s", json.dumps(self.error_file_data, indent=2)
116
+ )
117
+ self.message, self.timestamp = self._get_error_data(
118
+ self.error_file_data
119
+ )
120
+ except Exception:
121
+ log.exception("Failed to parse reply file: %s", self.error_file)
122
+ raise
123
+ else:
124
+ self._set_no_reply_file()
125
+
126
+ # make up an informative message if not already present
127
+ if not self.message:
128
+ # signals typically do not generate an error file message
129
+ if self.exitcode < 0:
130
+ self.message = (
131
+ f"Signal {-self.exitcode} ({self.signal_name()})"
132
+ f" received by PID {self.pid}"
133
+ )
134
+ else:
135
+ self.message = "To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html"
136
+
137
+ def _get_error_data(self, error_file_data: Dict[str, Any]) -> Tuple[str, int]:
138
+ message = error_file_data["message"]
139
+ if isinstance(message, str):
140
+ timestamp = int(error_file_data.get("timestamp", 0))
141
+ else:
142
+ timestamp = int(message["extraInfo"]["timestamp"])
143
+ return (message, timestamp)
144
+
145
+ def _set_no_reply_file(self):
146
+ self.error_file = _NOT_AVAILABLE
147
+ self.error_file_data = _EMPTY_ERROR_DATA
148
+ self.message = ""
149
+ self.timestamp = int(time.time())
150
+
151
+ def signal_name(self) -> str:
152
+ if self.exitcode < 0:
153
+ # We don't want to kill the parent process trying to find the signal name.
154
+ # if the signal doesn't map to a known name, use not available.
155
+ try:
156
+ return signal.Signals(-self.exitcode).name
157
+ except Exception:
158
+ return _NOT_AVAILABLE
159
+ else:
160
+ return _NOT_AVAILABLE
161
+
162
+ def timestamp_isoformat(self):
163
+ """Return timestamp in ISO format (YYYY-MM-DD_HH:MM:SS)."""
164
+ return datetime.fromtimestamp(self.timestamp).isoformat(sep="_")
165
+
166
+
167
+ GlobalRank = int
168
+
169
+ _FAILURE_FORMAT_TEMPLATE = """[${idx}]:
170
+ time : ${time}
171
+ host : ${hostname}
172
+ rank : ${rank} (local_rank: ${local_rank})
173
+ exitcode : ${exitcode} (pid: ${pid})
174
+ error_file: ${error_file}
175
+ traceback : ${message}"""
176
+
177
+ # extra new lines before and after are intentional
178
+ _MSG_FORMAT_TEMPLATE = """
179
+ ${boarder}
180
+ ${title}
181
+ ${section}
182
+ Failures:
183
+ ${other_failures}
184
+ ${section}
185
+ Root Cause (first observed failure):
186
+ ${root_failure}
187
+ ${boarder}"""
188
+
189
+
190
+ class ChildFailedError(Exception):
191
+ """
192
+ Special exception type that can be raised from a function annotated with the
193
+ ``@record`` decorator to have the child process' (root exception) propagate
194
+ up the stack as-is (e.g. without being wrapped in the parent's traceback).
195
+
196
+ Useful in cases where the parent is a simple nanny process
197
+ and the child (worker) processes are actually doing meaningful compute.
198
+ In this case, errors typically occur on the child process as the parent
199
+ is not doing anything non-trivial, and child errors should be propagated
200
+ to the scheduler for accurate root cause diagnostics.
201
+
202
+ .. note:: The propagation relies on error files rather than exception handling to
203
+ support both function and binary launches.
204
+
205
+ Example:
206
+ ::
207
+
208
+ # process tree on a host (container)
209
+ 0: scheduler-init-process:
210
+ |- 1: torchelastic_agent:
211
+ |- 2: trainer_0 (ok)
212
+ |- 3: trainer_1 (fail) -> error.json
213
+ |- ...
214
+ |- n+2: trainer_n (ok)
215
+ |- n+3: other processes
216
+ |- ...
217
+
218
+ In the example above, trainer 1's failure (written into error.json) is
219
+ the root cause and should be reported to the scheduler's init process.
220
+ The torchelastic agent raises a ``ChildFailedError("trainer", {1: "trainer_1/error.json"})``
221
+ upon detecting trainer 1's failure which would propagate the contents
222
+ of trainer 1's error file to the scheduler's init process.
223
+ """
224
+
225
+ def __init__(self, name: str, failures: Dict[GlobalRank, ProcessFailure]):
226
+ self.name = name
227
+ self.failures = failures
228
+ assert (
229
+ self.failures
230
+ ) # does not make sense to create a ChildFaileError with no failures
231
+ super().__init__(self.format_msg())
232
+
233
+ def get_first_failure(self) -> Tuple[GlobalRank, ProcessFailure]:
234
+ rank = min(self.failures.keys(), key=lambda r: self.failures[r].timestamp)
235
+ return rank, self.failures[rank]
236
+
237
+ def format_msg(self, boarder_delim="=", section_delim="-"):
238
+ title = f"{self.name} FAILED"
239
+ root_rank, root_failure = self.get_first_failure()
240
+
241
+ root_failure_fmt: str = ""
242
+ other_failures_fmt: List[str] = []
243
+ width = len(title)
244
+ for idx, (rank, failure) in enumerate(self.failures.items()):
245
+ fmt, w = self._format_failure(idx, rank, failure)
246
+ width = max(width, w)
247
+ if rank == root_rank:
248
+ root_failure_fmt = fmt
249
+ else:
250
+ other_failures_fmt.append(fmt)
251
+
252
+ # upper boundary on width
253
+ width = min(width, 60)
254
+
255
+ return Template(_MSG_FORMAT_TEMPLATE).substitute(
256
+ boarder=boarder_delim * width,
257
+ title=title,
258
+ section=section_delim * width,
259
+ root_failure=root_failure_fmt,
260
+ other_failures="\n".join(other_failures_fmt or [" <NO_OTHER_FAILURES>"]),
261
+ )
262
+
263
+ def _format_failure(
264
+ self, idx: int, rank: int, failure: ProcessFailure
265
+ ) -> Tuple[str, int]:
266
+
267
+ # failure.message is either a str (when the failure does not generate a traceback - e.g. signals)
268
+ # or a dict (json) of the form
269
+ # {"message": $ERROR_MSG, "extraInfo": {"py_callstack": $TRACEBACK, timestamp: $TS}}
270
+ # so the display logic is:
271
+ # 1. if failure.message is not a dict (it is a str) just show it as is
272
+ # 2. else try to get the traceback (py_callstack)
273
+ # 3. if the traceback is not there, use the message
274
+ # 4. if the message is not there show <N/A>
275
+ msg = failure.message
276
+ if isinstance(failure.message, dict):
277
+ msg = (
278
+ failure.message.get("extraInfo", {})
279
+ .get("py_callstack", failure.message.get("message", "<N/A>"))
280
+ .replace("\n", "\n ") # to properly indent the traceback
281
+ )
282
+
283
+ fmt = Template(_FAILURE_FORMAT_TEMPLATE).substitute(
284
+ idx=idx,
285
+ time=failure.timestamp_isoformat(),
286
+ hostname=socket.getfqdn(),
287
+ rank=rank,
288
+ local_rank=failure.local_rank,
289
+ exitcode=failure.exitcode,
290
+ pid=failure.pid,
291
+ error_file=failure.error_file,
292
+ message=msg,
293
+ )
294
+ width = 0
295
+ for line in fmt.split("\n"):
296
+ width = max(width, len(line))
297
+ return fmt, width
298
+
299
+
300
+ def record(
301
+ fn: Callable[..., T], error_handler: Optional[ErrorHandler] = None
302
+ ) -> Callable[..., T]:
303
+ """
304
+ Syntactic sugar to record errors/exceptions that happened in the decorated
305
+ function using the provided ``error_handler``.
306
+
307
+ Using this decorator is equivalent to:
308
+
309
+ ::
310
+
311
+ error_handler = get_error_handler()
312
+ error_handler.initialize()
313
+ try:
314
+ foobar()
315
+ except ChildFailedError as e:
316
+ _, failure = e.get_first_failure()
317
+ error_handler.dump_error_file(failure.error_file, failure.exitcode)
318
+ raise
319
+ except Exception as e:
320
+ error_handler.record(e)
321
+ raise
322
+
323
+ .. important:: use this decorator once per process at the top level method,
324
+ typically this is the main method.
325
+
326
+ Example
327
+
328
+ ::
329
+
330
+ @record
331
+ def main():
332
+ pass
333
+
334
+ if __name__=="__main__":
335
+ main()
336
+
337
+ """
338
+ if not error_handler:
339
+ error_handler = get_error_handler()
340
+
341
+ def wrap(f):
342
+ @wraps(f)
343
+ def wrapper(*args, **kwargs):
344
+ assert error_handler is not None # assertion for mypy type checker
345
+ error_handler.initialize()
346
+ try:
347
+ return f(*args, **kwargs)
348
+ except SystemExit as se:
349
+ # For run_path based entrypoints, SystemExit with code = 0 will never exit.
350
+ # Handling it here by returning a value:
351
+ if se.code == 0:
352
+ return None
353
+ else:
354
+ raise
355
+ except ChildFailedError as e:
356
+ rank, failure = e.get_first_failure()
357
+ if failure.error_file != _NOT_AVAILABLE:
358
+ error_handler.dump_error_file(failure.error_file, failure.exitcode)
359
+ else:
360
+ log.info(
361
+ (
362
+ "local_rank %s FAILED with no error file."
363
+ " Decorate your entrypoint fn with @record for traceback info."
364
+ " See: https://pytorch.org/docs/stable/elastic/errors.html",
365
+ rank
366
+ )
367
+ )
368
+ raise
369
+ except Exception as e:
370
+ error_handler.record_exception(e)
371
+ raise
372
+
373
+ return wrapper
374
+
375
+ return wrap(fn)
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/error_handler.cpython-310.pyc ADDED
Binary file (5.21 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/handlers.cpython-310.pyc ADDED
Binary file (451 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/error_handler.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+ import faulthandler
9
+ import json
10
+ import logging
11
+ import os
12
+ import time
13
+ import traceback
14
+ import warnings
15
+ from typing import Any, Dict, Optional
16
+
17
+ __all__ = ['ErrorHandler']
18
+
19
+ log = logging.getLogger(__name__)
20
+
21
+
22
+ class ErrorHandler:
23
+ """
24
+ Write the provided exception object along with some other metadata about
25
+ the error in a structured way in JSON format to an error file specified by the
26
+ environment variable: ``TORCHELASTIC_ERROR_FILE``. If this environment
27
+ variable is not set, then simply logs the contents of what would have been
28
+ written to the error file.
29
+
30
+ This handler may be subclassed to customize the handling of the error.
31
+ Subclasses should override ``initialize()`` and ``record_exception()``.
32
+ """
33
+
34
+ def _get_error_file_path(self) -> Optional[str]:
35
+ """
36
+ Return the error file path.
37
+
38
+ May return ``None`` to have the structured error be logged only.
39
+ """
40
+ return os.environ.get("TORCHELASTIC_ERROR_FILE", None)
41
+
42
+ def initialize(self) -> None:
43
+ """
44
+ Call prior to running code that we wish to capture errors/exceptions.
45
+
46
+ Typically registers signal/fault handlers. Users can override this
47
+ function to add custom initialization/registrations that aid in
48
+ propagation/information of errors/signals/exceptions/faults.
49
+ """
50
+ try:
51
+ faulthandler.enable(all_threads=True)
52
+ except Exception as e:
53
+ warnings.warn(f"Unable to enable fault handler. {type(e).__name__}: {e}")
54
+
55
+ def _write_error_file(self, file_path: str, error_msg: str) -> None:
56
+ """Write error message to the file."""
57
+ try:
58
+ with open(file_path, "w") as fp:
59
+ fp.write(error_msg)
60
+ except Exception as e:
61
+ warnings.warn(f"Unable to write error to file. {type(e).__name__}: {e}")
62
+
63
+ def record_exception(self, e: BaseException) -> None:
64
+ """
65
+ Write a structured information about the exception into an error file in JSON format.
66
+
67
+ If the error file cannot be determined, then logs the content
68
+ that would have been written to the error file.
69
+ """
70
+ file = self._get_error_file_path()
71
+ if file:
72
+ data = {
73
+ "message": {
74
+ "message": f"{type(e).__name__}: {e}",
75
+ "extraInfo": {
76
+ "py_callstack": traceback.format_exc(),
77
+ "timestamp": str(int(time.time())),
78
+ },
79
+ }
80
+ }
81
+ with open(file, "w") as fp:
82
+ json.dump(data, fp)
83
+
84
+ def override_error_code_in_rootcause_data(
85
+ self,
86
+ rootcause_error_file: str,
87
+ rootcause_error: Dict[str, Any],
88
+ error_code: int = 0,
89
+ ):
90
+ """Modify the rootcause_error read from the file, to correctly set the exit code."""
91
+ if "message" not in rootcause_error:
92
+ log.warning(
93
+ "child error file (%s) does not have field `message`. \n"
94
+ "cannot override error code: %s",
95
+ rootcause_error_file, error_code
96
+ )
97
+ elif isinstance(rootcause_error["message"], str):
98
+ log.warning(
99
+ "child error file (%s) has a new message format. \n"
100
+ "skipping error code override",
101
+ rootcause_error_file
102
+ )
103
+ else:
104
+ rootcause_error["message"]["errorCode"] = error_code
105
+
106
+ def dump_error_file(self, rootcause_error_file: str, error_code: int = 0):
107
+ """Dump parent error file from child process's root cause error and error code."""
108
+ with open(rootcause_error_file) as fp:
109
+ rootcause_error = json.load(fp)
110
+ # Override error code since the child process cannot capture the error code if it
111
+ # is terminated by signals like SIGSEGV.
112
+ if error_code:
113
+ self.override_error_code_in_rootcause_data(rootcause_error_file, rootcause_error, error_code)
114
+ log.debug(
115
+ "child error file (%s) contents:\n"
116
+ "%s",
117
+ rootcause_error_file, json.dumps(rootcause_error, indent=2)
118
+ )
119
+
120
+ my_error_file = self._get_error_file_path()
121
+ if my_error_file:
122
+ # Guard against existing error files
123
+ # This can happen when the child is created using multiprocessing
124
+ # and the same env var (TORCHELASTIC_ERROR_FILE) is used on the
125
+ # parent and child to specify the error files (respectively)
126
+ # because the env vars on the child is set in the wrapper function
127
+ # and by default the child inherits the parent's env vars, if the child
128
+ # process receives a signal before the wrapper function kicks in
129
+ # and the signal handler writes to the error file, then the child
130
+ # will write to the parent's error file. In this case just log the
131
+ # original error file contents and overwrite the error file.
132
+ self._rm(my_error_file)
133
+ self._write_error_file(my_error_file, json.dumps(rootcause_error))
134
+ log.info("dumped error file to parent's %s", my_error_file)
135
+ else:
136
+ log.error(
137
+ "no error file defined for parent, to copy child error file (%s)", rootcause_error_file
138
+ )
139
+
140
+ def _rm(self, my_error_file):
141
+ if os.path.isfile(my_error_file):
142
+ # Log the contents of the original file.
143
+ with open(my_error_file) as fp:
144
+ try:
145
+ original = json.dumps(json.load(fp), indent=2)
146
+ log.warning(
147
+ "%s already exists"
148
+ " and will be overwritten."
149
+ " Original contents:\n%s",
150
+ my_error_file, original
151
+ )
152
+ except json.decoder.JSONDecodeError as err:
153
+ log.warning(
154
+ "%s already exists"
155
+ " and will be overwritten."
156
+ " Unable to load original contents:\n",
157
+ my_error_file
158
+ )
159
+ os.remove(my_error_file)
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/handlers.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+ # Multiprocessing error-reporting module
9
+
10
+
11
+ from torch.distributed.elastic.multiprocessing.errors.error_handler import ErrorHandler
12
+
13
+ __all__ = ['get_error_handler']
14
+
15
+ def get_error_handler():
16
+ return ErrorHandler()
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/redirects.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ # Taken and modified from original source:
10
+ # https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/
11
+ import ctypes
12
+ import logging
13
+ import os
14
+ import sys
15
+ from contextlib import contextmanager
16
+ from functools import partial
17
+
18
+ IS_WINDOWS = sys.platform == "win32"
19
+ IS_MACOS = sys.platform == "darwin"
20
+
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ def get_libc():
26
+ if IS_WINDOWS or IS_MACOS:
27
+ logger.warning(
28
+ "NOTE: Redirects are currently not supported in Windows or MacOs."
29
+ )
30
+ return None
31
+ else:
32
+ return ctypes.CDLL("libc.so.6")
33
+
34
+
35
+ libc = get_libc()
36
+
37
+
38
+ def _c_std(stream: str):
39
+ return ctypes.c_void_p.in_dll(libc, stream)
40
+
41
+
42
+ def _python_std(stream: str):
43
+ return {"stdout": sys.stdout, "stderr": sys.stderr}[stream]
44
+
45
+
46
+ _VALID_STD = {"stdout", "stderr"}
47
+
48
+
49
+ @contextmanager
50
+ def redirect(std: str, to_file: str):
51
+ """
52
+ Redirect ``std`` (one of ``"stdout"`` or ``"stderr"``) to a file in the path specified by ``to_file``.
53
+
54
+ This method redirects the underlying std file descriptor (not just python's ``sys.stdout|stderr``).
55
+ See usage for details.
56
+
57
+ Directory of ``dst_filename`` is assumed to exist and the destination file
58
+ is overwritten if it already exists.
59
+
60
+ .. note:: Due to buffering cross source writes are not guaranteed to
61
+ appear in wall-clock order. For instance in the example below
62
+ it is possible for the C-outputs to appear before the python
63
+ outputs in the log file.
64
+
65
+ Usage:
66
+
67
+ ::
68
+
69
+ # syntactic-sugar for redirect("stdout", "tmp/stdout.log")
70
+ with redirect_stdout("/tmp/stdout.log"):
71
+ print("python stdouts are redirected")
72
+ libc = ctypes.CDLL("libc.so.6")
73
+ libc.printf(b"c stdouts are also redirected"
74
+ os.system("echo system stdouts are also redirected")
75
+
76
+ print("stdout restored")
77
+
78
+ """
79
+ if std not in _VALID_STD:
80
+ raise ValueError(
81
+ f"unknown standard stream <{std}>, must be one of {_VALID_STD}"
82
+ )
83
+
84
+ c_std = _c_std(std)
85
+ python_std = _python_std(std)
86
+ std_fd = python_std.fileno()
87
+
88
+ def _redirect(dst):
89
+ libc.fflush(c_std)
90
+ python_std.flush()
91
+ os.dup2(dst.fileno(), std_fd)
92
+
93
+ with os.fdopen(os.dup(std_fd)) as orig_std, open(to_file, mode="w+b") as dst:
94
+ _redirect(dst)
95
+ try:
96
+ yield
97
+ finally:
98
+ _redirect(orig_std)
99
+
100
+
101
+ redirect_stdout = partial(redirect, "stdout")
102
+ redirect_stderr = partial(redirect, "stderr")
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+ from torch.distributed.elastic.multiprocessing.subprocess_handler.handlers import (
9
+ get_subprocess_handler,
10
+ )
11
+ from torch.distributed.elastic.multiprocessing.subprocess_handler.subprocess_handler import (
12
+ SubprocessHandler,
13
+ )
14
+
15
+ __all__ = ["SubprocessHandler", "get_subprocess_handler"]
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/handlers.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+ from typing import Dict, Tuple
9
+
10
+ from torch.distributed.elastic.multiprocessing.subprocess_handler.subprocess_handler import (
11
+ SubprocessHandler,
12
+ )
13
+
14
+ __all__ = ["get_subprocess_handler"]
15
+
16
+
17
+ def get_subprocess_handler(
18
+ entrypoint: str,
19
+ args: Tuple,
20
+ env: Dict[str, str],
21
+ stdout: str,
22
+ stderr: str,
23
+ local_rank_id: int,
24
+ ):
25
+ return SubprocessHandler(
26
+ entrypoint=entrypoint,
27
+ args=args,
28
+ env=env,
29
+ stdout=stdout,
30
+ stderr=stderr,
31
+ local_rank_id=local_rank_id,
32
+ )
llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+ import os
9
+ import signal
10
+ import subprocess
11
+ import sys
12
+
13
+ from typing import Any, Dict, Optional, Tuple
14
+
15
+ __all__ = ["SubprocessHandler"]
16
+
17
+ IS_WINDOWS = sys.platform == "win32"
18
+
19
+
20
+ def _get_default_signal() -> signal.Signals:
21
+ """Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows."""
22
+ if IS_WINDOWS:
23
+ return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821
24
+ else:
25
+ return signal.SIGTERM
26
+
27
+
28
+ class SubprocessHandler:
29
+ """
30
+ Convenience wrapper around python's ``subprocess.Popen``. Keeps track of
31
+ meta-objects associated to the process (e.g. stdout and stderr redirect fds).
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ entrypoint: str,
37
+ args: Tuple,
38
+ env: Dict[str, str],
39
+ stdout: str,
40
+ stderr: str,
41
+ local_rank_id: int,
42
+ ):
43
+ self._stdout = open(stdout, "w") if stdout else None
44
+ self._stderr = open(stderr, "w") if stderr else None
45
+ # inherit parent environment vars
46
+ env_vars = os.environ.copy()
47
+ env_vars.update(env)
48
+
49
+ args_str = (entrypoint, *[str(e) for e in args])
50
+ self.local_rank_id = local_rank_id
51
+ self.proc: subprocess.Popen = self._popen(args_str, env_vars)
52
+
53
+ def _popen(self, args: Tuple, env: Dict[str, str]) -> subprocess.Popen:
54
+ kwargs: Dict[str, Any] = {}
55
+ if not IS_WINDOWS:
56
+ kwargs["start_new_session"] = True
57
+ return subprocess.Popen(
58
+ # pyre-fixme[6]: Expected `Union[typing.Sequence[Union[_PathLike[bytes],
59
+ # _PathLike[str], bytes, str]], bytes, str]` for 1st param but got
60
+ # `Tuple[str, *Tuple[Any, ...]]`.
61
+ args=args,
62
+ env=env,
63
+ stdout=self._stdout,
64
+ stderr=self._stderr,
65
+ **kwargs,
66
+ )
67
+
68
+ def close(self, death_sig: Optional[signal.Signals] = None) -> None:
69
+ if not death_sig:
70
+ death_sig = _get_default_signal()
71
+ if IS_WINDOWS:
72
+ self.proc.send_signal(death_sig)
73
+ else:
74
+ os.killpg(self.proc.pid, death_sig)
75
+ if self._stdout:
76
+ self._stdout.close()
77
+ if self._stderr:
78
+ self._stderr.close()
llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env/python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+
10
+ from torch.distributed.launcher.api import ( # noqa: F401
11
+ LaunchConfig,
12
+ elastic_launch,
13
+ launch_agent,
14
+ )
llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (314 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/api.cpython-310.pyc ADDED
Binary file (9.33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/api.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ # Copyright (c) Facebook, Inc. and its affiliates.
4
+ # All rights reserved.
5
+ #
6
+ # This source code is licensed under the BSD-style license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+ import sys
9
+ import uuid
10
+ from dataclasses import dataclass, field
11
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
12
+
13
+ import torch.distributed.elastic.rendezvous.registry as rdzv_registry
14
+ from torch.distributed.elastic import events, metrics
15
+ from torch.distributed.elastic.agent.server.api import WorkerSpec
16
+ from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent
17
+ from torch.distributed.elastic.multiprocessing import DefaultLogsSpecs, LogsSpecs, SignalException
18
+ from torch.distributed.elastic.multiprocessing.errors import ChildFailedError
19
+ from torch.distributed.elastic.rendezvous import RendezvousParameters
20
+ from torch.distributed.elastic.rendezvous.utils import parse_rendezvous_endpoint
21
+ from torch.distributed.elastic.utils.logging import get_logger
22
+
23
+ __all__ = ['LaunchConfig', 'elastic_launch', 'launch_agent']
24
+
25
+ logger = get_logger(__name__)
26
+
27
+
28
+ @dataclass
29
+ class LaunchConfig:
30
+ """
31
+ Creates a rendezvous config.
32
+
33
+ Args:
34
+ min_nodes: Minimum amount of nodes that the user function will
35
+ be launched on. Elastic agent ensures that the user
36
+ function start only when the min_nodes amount enters
37
+ the rendezvous.
38
+ max_nodes: Maximum amount of nodes that the user function
39
+ will be launched on.
40
+ nproc_per_node: On each node the elastic agent will launch
41
+ this amount of workers that will execute user
42
+ defined function.
43
+ rdzv_backend: rdzv_backend to use in the rendezvous (zeus-adapter, etcd).
44
+ rdzv_endpoint: The endpoint of the rdzv sync. storage.
45
+ rdzv_configs: Key, value pair that specifies rendezvous specific configuration.
46
+ rdzv_timeout: Legacy argument that specifies timeout for the rendezvous. It is going
47
+ to be removed in future versions, see the note below. The default timeout is 900 seconds.
48
+ run_id: The unique run id of the job (if not passed a unique one will be
49
+ deduced from run environment - flow workflow id in flow - or auto generated).
50
+ role: User defined role of the worker (defaults to "trainer").
51
+ max_restarts: The maximum amount of restarts that elastic agent will conduct
52
+ on workers before failure.
53
+ monitor_interval: The interval in seconds that is used by the elastic_agent
54
+ as a period of monitoring workers.
55
+ start_method: The method is used by the elastic agent to start the
56
+ workers (spawn, fork, forkserver).
57
+ metrics_cfg: configuration to initialize metrics.
58
+ local_addr: address of the local node if any. If not set, a lookup on the local
59
+ machine's FQDN will be performed.
60
+ local_ranks_filter: ranks for which to show logs in console. If not set, show from all.
61
+ ..note:
62
+ `rdzv_timeout` is a legacy argument that will be removed in future.
63
+ Set the timeout via `rdzv_configs['timeout']`
64
+
65
+ """
66
+
67
+ min_nodes: int
68
+ max_nodes: int
69
+ nproc_per_node: int
70
+ logs_specs: Optional[LogsSpecs] = None
71
+ run_id: str = ""
72
+ role: str = "default_role"
73
+ rdzv_endpoint: str = ""
74
+ rdzv_backend: str = "etcd"
75
+ rdzv_configs: Dict[str, Any] = field(default_factory=dict)
76
+ rdzv_timeout: int = -1
77
+ max_restarts: int = 3
78
+ monitor_interval: float = 30
79
+ start_method: str = "spawn"
80
+ log_line_prefix_template: Optional[str] = None
81
+ metrics_cfg: Dict[str, str] = field(default_factory=dict)
82
+ local_addr: Optional[str] = None
83
+
84
+ def __post_init__(self):
85
+ default_timeout = 900
86
+ if self.rdzv_timeout != -1:
87
+ self.rdzv_configs["timeout"] = self.rdzv_timeout
88
+ elif "timeout" not in self.rdzv_configs:
89
+ self.rdzv_configs["timeout"] = default_timeout
90
+
91
+ # Post-processing to enable refactoring to introduce logs_specs due to non-torchrun API usage
92
+ if self.logs_specs is None:
93
+ self.logs_specs = DefaultLogsSpecs()
94
+
95
+
96
+ class elastic_launch:
97
+ """
98
+ Launches an torchelastic agent on the container that invoked the entrypoint.
99
+
100
+ 1. Pass the ``entrypoint`` arguments as non ``kwargs`` (e.g. no named parameters)/
101
+ ``entrypoint`` can be a function or a command.
102
+ 2. The return value is a map of each worker's output mapped
103
+ by their respective global rank.
104
+
105
+ Usage
106
+
107
+ ::
108
+
109
+ def worker_fn(foo):
110
+ # ...
111
+
112
+ def main():
113
+ # entrypoint is a function.
114
+ outputs = elastic_launch(LaunchConfig, worker_fn)(foo)
115
+ # return rank 0's output
116
+ return outputs[0]
117
+
118
+ # entrypoint is a command and ``script.py`` is the python module.
119
+ outputs = elastic_launch(LaunchConfig, "script.py")(args)
120
+ outputs = elastic_launch(LaunchConfig, "python")("script.py")
121
+ """
122
+
123
+ def __init__(
124
+ self,
125
+ config: LaunchConfig,
126
+ entrypoint: Union[Callable, str, None],
127
+ ):
128
+ self._config = config
129
+ self._entrypoint = entrypoint
130
+
131
+ def __call__(self, *args):
132
+ return launch_agent(self._config, self._entrypoint, list(args))
133
+
134
+
135
+ def _get_entrypoint_name(
136
+ entrypoint: Union[Callable, str, None], args: List[Any]
137
+ ) -> str:
138
+ """Retrieve entrypoint name with the rule:
139
+ 1. If entrypoint is a function, use ``entrypoint.__qualname__``.
140
+ 2. If entrypoint is a string, check its value:
141
+ 2.1 if entrypoint equals to ``sys.executable`` (like "python"), use the first element from ``args``
142
+ which does not start with hifen letter (for example, "-u" will be skipped).
143
+ 2.2 otherwise, use ``entrypoint`` value.
144
+ 3. Otherwise, return empty string.
145
+ """
146
+ if isinstance(entrypoint, Callable): # type: ignore[arg-type]
147
+ return entrypoint.__name__ # type: ignore[union-attr]
148
+ elif isinstance(entrypoint, str):
149
+ if entrypoint == sys.executable:
150
+ return next((arg for arg in args if arg[0] != "-"), "")
151
+ else:
152
+ return entrypoint
153
+ else:
154
+ return ""
155
+
156
+
157
+ def _get_addr_and_port(
158
+ rdzv_parameters: RendezvousParameters,
159
+ ) -> Tuple[Optional[str], Optional[int]]:
160
+ if rdzv_parameters.backend != "static":
161
+ return (None, None)
162
+ endpoint = rdzv_parameters.endpoint
163
+ endpoint = endpoint.strip()
164
+ if not endpoint:
165
+ raise ValueError(
166
+ "Endpoint is missing in endpoint. Try to add --master-addr and --master-port"
167
+ )
168
+ master_addr, master_port = parse_rendezvous_endpoint(endpoint, default_port=-1)
169
+ if master_port == -1:
170
+ raise ValueError(
171
+ f"port is missing in endpoint: {endpoint}. Try to specify --master-port"
172
+ )
173
+ return (master_addr, master_port)
174
+
175
+
176
+ def launch_agent(
177
+ config: LaunchConfig,
178
+ entrypoint: Union[Callable, str, None],
179
+ args: List[Any],
180
+ ) -> Dict[int, Any]:
181
+ if not config.run_id:
182
+ run_id = str(uuid.uuid4().int)
183
+ logger.warning("config has no run_id, generated a random run_id: %s", run_id)
184
+ config.run_id = run_id
185
+
186
+ entrypoint_name = _get_entrypoint_name(entrypoint, args)
187
+
188
+ logger.info(
189
+ "Starting elastic_operator with launch configs:\n"
190
+ " entrypoint : %(entrypoint)s\n"
191
+ " min_nodes : %(min_nodes)s\n"
192
+ " max_nodes : %(max_nodes)s\n"
193
+ " nproc_per_node : %(nproc_per_node)s\n"
194
+ " run_id : %(run_id)s\n"
195
+ " rdzv_backend : %(rdzv_backend)s\n"
196
+ " rdzv_endpoint : %(rdzv_endpoint)s\n"
197
+ " rdzv_configs : %(rdzv_configs)s\n"
198
+ " max_restarts : %(max_restarts)s\n"
199
+ " monitor_interval : %(monitor_interval)s\n"
200
+ " log_dir : %(log_dir)s\n"
201
+ " metrics_cfg : %(metrics_cfg)s\n",
202
+ {
203
+ "entrypoint": entrypoint_name,
204
+ "min_nodes": config.min_nodes,
205
+ "max_nodes": config.max_nodes,
206
+ "nproc_per_node": config.nproc_per_node,
207
+ "run_id": config.run_id,
208
+ "rdzv_backend": config.rdzv_backend,
209
+ "rdzv_endpoint": config.rdzv_endpoint,
210
+ "rdzv_configs": config.rdzv_configs,
211
+ "max_restarts": config.max_restarts,
212
+ "monitor_interval": config.monitor_interval,
213
+ "log_dir": config.logs_specs.root_log_dir, # type: ignore[union-attr]
214
+ "metrics_cfg": config.metrics_cfg
215
+ }
216
+ )
217
+
218
+ rdzv_parameters = RendezvousParameters(
219
+ backend=config.rdzv_backend,
220
+ endpoint=config.rdzv_endpoint,
221
+ run_id=config.run_id,
222
+ min_nodes=config.min_nodes,
223
+ max_nodes=config.max_nodes,
224
+ local_addr=config.local_addr,
225
+ **config.rdzv_configs,
226
+ )
227
+
228
+ master_addr, master_port = _get_addr_and_port(rdzv_parameters)
229
+
230
+ spec = WorkerSpec(
231
+ role=config.role,
232
+ local_world_size=config.nproc_per_node,
233
+ entrypoint=entrypoint,
234
+ args=tuple(args),
235
+ rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters),
236
+ max_restarts=config.max_restarts,
237
+ monitor_interval=config.monitor_interval,
238
+ master_addr=master_addr,
239
+ master_port=master_port,
240
+ local_addr=config.local_addr,
241
+ )
242
+
243
+ agent = LocalElasticAgent(
244
+ spec=spec,
245
+ logs_specs=config.logs_specs, # type: ignore[arg-type]
246
+ start_method=config.start_method,
247
+ log_line_prefix_template=config.log_line_prefix_template,
248
+ )
249
+
250
+ shutdown_rdzv = True
251
+ try:
252
+ metrics.initialize_metrics(metrics.MetricsConfig(config.metrics_cfg))
253
+
254
+ result = agent.run()
255
+ # records that agent.run() has succeeded NOT that workers have succeeded
256
+ events.record(agent.get_event_succeeded())
257
+
258
+ if result.is_failed():
259
+ # ChildFailedError is treated specially by @record
260
+ # if the error files for the failed children exist
261
+ # @record will copy the first error (root cause)
262
+ # to the error file of the launcher process.
263
+ raise ChildFailedError(
264
+ name=entrypoint_name,
265
+ failures=result.failures,
266
+ )
267
+
268
+ return result.return_values
269
+ except ChildFailedError:
270
+ raise
271
+ except SignalException:
272
+ # when the agent dies with a signal do NOT shutdown the rdzv_handler
273
+ # since this closes the rendezvous on this rdzv_id permanently and
274
+ # prevents any additional scaling events
275
+ shutdown_rdzv = False
276
+ events.record(agent.get_event_failed())
277
+ raise
278
+ except Exception:
279
+ events.record(agent.get_event_failed())
280
+ raise
281
+ finally:
282
+ if shutdown_rdzv:
283
+ spec.rdzv_handler.shutdown()
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__init__.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ :mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list
3
+ of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the
4
+ optimizer locally on the workers where the parameters live. The distributed
5
+ optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
6
+ apply the gradients on each worker.
7
+ """
8
+ import torch
9
+ from torch import optim
10
+
11
+ from .apply_optimizer_in_backward import (
12
+ _apply_optimizer_in_backward,
13
+ _get_in_backward_optimizers,
14
+ )
15
+ from .functional_adadelta import _FunctionalAdadelta
16
+
17
+ from .functional_adagrad import _FunctionalAdagrad
18
+ from .functional_adam import _FunctionalAdam
19
+ from .functional_adamax import _FunctionalAdamax
20
+ from .functional_adamw import _FunctionalAdamW
21
+ from .functional_rmsprop import _FunctionalRMSprop
22
+ from .functional_rprop import _FunctionalRprop
23
+ from .functional_sgd import _FunctionalSGD
24
+ from .named_optimizer import _NamedOptimizer
25
+ from .utils import as_functional_optim
26
+
27
+
28
+ # DistributedOptimizer imports torch.distributed.rpc names, so gate availability
29
+ # based on RPC being available.
30
+ if hasattr(torch._C, "_rpc_init"):
31
+ from .optimizer import DistributedOptimizer
32
+
33
+ from .post_localSGD_optimizer import PostLocalSGDOptimizer
34
+ from .zero_redundancy_optimizer import ZeroRedundancyOptimizer
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.53 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adadelta.cpython-310.pyc ADDED
Binary file (2.45 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adagrad.cpython-310.pyc ADDED
Binary file (2.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adam.cpython-310.pyc ADDED
Binary file (4.02 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adamw.cpython-310.pyc ADDED
Binary file (3.92 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rprop.cpython-310.pyc ADDED
Binary file (2.48 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/named_optimizer.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/post_localSGD_optimizer.cpython-310.pyc ADDED
Binary file (5.23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/utils.cpython-310.pyc ADDED
Binary file (2.18 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/apply_optimizer_in_backward.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Iterable, List, no_type_check, Type
2
+
3
+ import torch
4
+
5
+ __all__: List[str] = []
6
+
7
+ # WeakTensorKeyDictionary to store relevant meta-data for the Tensor/Parameter
8
+ # without changing it's life-time.
9
+ # NOTE: Alternative is to add the meta-data as an attribute to the tensor,
10
+ # but that will serialize the meta-data if Tensor is serialized.
11
+ param_to_optim_hook_handle_map = torch.utils.weak.WeakTensorKeyDictionary()
12
+ param_to_acc_grad_map = torch.utils.weak.WeakTensorKeyDictionary()
13
+
14
+ @no_type_check
15
+ def _apply_optimizer_in_backward(
16
+ optimizer_class: Type[torch.optim.Optimizer],
17
+ params: Iterable[torch.nn.Parameter],
18
+ optimizer_kwargs: Dict[str, Any],
19
+ register_hook: bool = True,
20
+ ) -> None:
21
+ """
22
+ Upon ``backward()``, the optimizer specified for each parameter will fire after
23
+ the gradient has been accumulated into the parameter.
24
+
25
+ Note - gradients for these parameters will be set to None after ``backward()``.
26
+ This means that any other optimizer not specified via `_apply_optimizer_in_backward`
27
+ over this parameter will be a no-op.
28
+
29
+ Args:
30
+ optimizer_class: (Type[torch.optim.Optimizer]): Optimizer to apply to parameter
31
+ params: (Iterator[nn.Parameter]): parameters to apply optimizer state to
32
+ optimizer_kwargs: (Dict[str, Any]): kwargs to pass to optimizer constructor
33
+ register_hook: (bool): whether to register a hook that runs the optimizer
34
+ after gradient for this parameter is accumulated. This is the default
35
+ way that optimizer in backward is implemented, but specific use cases
36
+ (such as DDP) may wish to override this to implement custom behavior.
37
+ (Default = True)
38
+
39
+ Example::
40
+ params_generator = model.parameters()
41
+ param_1 = next(params_generator)
42
+ remainder_params = list(params_generator)
43
+
44
+ apply_optimizer_in_backward(torch.optim.SGD, [param_1], {"lr": .02})
45
+ apply_optimizer_in_backward(torch.optim.Adam, remainder_params, {"lr": .04})
46
+
47
+ model(...).sum().backward() # after backward, parameters will already
48
+ # have their registered optimizer(s) applied.
49
+
50
+ """
51
+ torch._C._log_api_usage_once(
52
+ "torch.distributed.optim.apply_optimizer_in_backward"
53
+ )
54
+
55
+ @no_type_check
56
+ def _apply_optimizer_in_backward_to_param(param: torch.nn.Parameter) -> None:
57
+ # view_as creates a node in autograd graph that allows us access to the
58
+ # parameter's AccumulateGrad autograd function object. We register a
59
+ # hook on this object to fire the optimizer when the gradient for
60
+ # this parameter is ready (has been accumulated into .grad field)
61
+
62
+ # Don't create a new acc_grad if we already have one
63
+ # i.e. for shared parameters or attaching multiple optimizers to a param.
64
+ if param not in param_to_acc_grad_map:
65
+ param_to_acc_grad_map[param] = param.view_as(param).grad_fn.next_functions[0][0]
66
+
67
+ optimizer = optimizer_class([param], **optimizer_kwargs)
68
+
69
+ if not hasattr(param, "_in_backward_optimizers"):
70
+ param._in_backward_optimizers = [] # type: ignore[attr-defined]
71
+ # TODO: Remove these attributes once we have a better way of accessing
72
+ # optimizer classes and kwargs for a parameter.
73
+ param._optimizer_classes = [] # type: ignore[attr-defined]
74
+ param._optimizer_kwargs = [] # type: ignore[attr-defined]
75
+
76
+ param._in_backward_optimizers.append(optimizer) # type: ignore[attr-defined]
77
+ param._optimizer_classes.append(optimizer_class) # type: ignore[attr-defined]
78
+ param._optimizer_kwargs.append(optimizer_kwargs) # type: ignore[attr-defined]
79
+
80
+ if not register_hook:
81
+ return
82
+
83
+ def optimizer_hook(*_unused) -> None:
84
+ for opt in param._in_backward_optimizers: # type: ignore[attr-defined]
85
+ opt.step()
86
+
87
+ param.grad = None
88
+
89
+ handle = param_to_acc_grad_map[param].register_hook(optimizer_hook) # type: ignore[attr-defined]
90
+ if param not in param_to_optim_hook_handle_map:
91
+ param_to_optim_hook_handle_map[param] = []
92
+ param_to_optim_hook_handle_map[param].append(handle)
93
+
94
+ for param in params:
95
+ _apply_optimizer_in_backward_to_param(param)
96
+
97
+
98
+ def _get_in_backward_optimizers(module: torch.nn.Module) -> List[torch.optim.Optimizer]:
99
+ """
100
+ Return a list of in-backward optimizers applied to ``module``'s parameters. Note that these
101
+ optimizers are not intended to directly have their ``step`` or ``zero_grad`` methods called
102
+ by the user and are intended to be used for things like checkpointing.
103
+
104
+ Args:
105
+ module: (torch.nn.Module): model to retrieve in-backward optimizers for
106
+
107
+ Returns:
108
+ List[torch.optim.Optimizer]: the in-backward optimizers.
109
+
110
+ Example::
111
+ _apply_optimizer_in_backward(torch.optim.SGD, model.parameters(), {'lr': 0.01})
112
+ optims = _get_optimizers_in_backward(model)
113
+ """
114
+ optims: List[torch.optim.Optimizer] = []
115
+ for param in module.parameters():
116
+ optims.extend(getattr(param, "_in_backward_optimizers", []))
117
+
118
+ return optims
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adadelta.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Optional
2
+
3
+ import torch
4
+ import torch.optim._functional as F
5
+
6
+ from torch import Tensor
7
+
8
+ __all__: List[str] = []
9
+
10
+ # Define a TorchScript compatible Functional Adadelta Optimizer
11
+ # where we use these optimizer in a functional way.
12
+ # Instead of using the `param.grad` when updating parameters,
13
+ # we explicitly allow the distributed optimizer pass gradients to
14
+ # the `step` function. In this way, we could separate the gradients
15
+ # and parameters and allow multithreaded trainer to update the
16
+ # parameters without data traces on accumulating to the same .grad.
17
+ # NOTE: This should be only used by distributed optimizer internals
18
+ # and not meant to expose to the user.
19
+ @torch.jit.script
20
+ class _FunctionalAdadelta:
21
+ def __init__(
22
+ self,
23
+ params: List[Tensor],
24
+ lr: float = 1.0,
25
+ rho: float = 0.9,
26
+ eps: float = 1e-6,
27
+ weight_decay: float = 0.0,
28
+ foreach: bool = False,
29
+ maximize: bool = False,
30
+ _allow_empty_param_list: bool = False,
31
+ ):
32
+ self.defaults = {
33
+ "lr": lr,
34
+ "rho": rho,
35
+ "eps": eps,
36
+ "weight_decay": weight_decay,
37
+ }
38
+ self.foreach = foreach
39
+ self.maximize = maximize
40
+
41
+ if len(params) == 0 and not _allow_empty_param_list:
42
+ raise ValueError("optimizer got an empty parameter list")
43
+
44
+ # NOTE: we only have one param_group and don't allow user to add additional
45
+ # param group as it's not a common use case.
46
+ self.param_group = {"params": params}
47
+
48
+ self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {})
49
+
50
+ def step(self, gradients: List[Optional[Tensor]]):
51
+ params = self.param_group["params"]
52
+ params_with_grad = []
53
+ grads = []
54
+ square_avgs = []
55
+ acc_deltas = []
56
+ lr = self.defaults["lr"]
57
+ rho = self.defaults["rho"]
58
+ eps = self.defaults["eps"]
59
+ weight_decay = self.defaults["weight_decay"]
60
+
61
+ if len(params) != len(gradients):
62
+ raise ValueError(
63
+ "the gradients passed in does not equal to the size of the parameters!"
64
+ + f"Params length: {len(params)}. "
65
+ + f"Gradients length: {len(gradients)}"
66
+ )
67
+ has_complex = False
68
+ for param, gradient in zip(params, gradients):
69
+ if gradient is not None:
70
+ has_complex |= torch.is_complex(param)
71
+ params_with_grad.append(param)
72
+ grads.append(gradient)
73
+ # Lazy state initialization
74
+ if param not in self.state:
75
+ self.state[param] = {}
76
+ state = self.state[param]
77
+ state["step"] = torch.tensor(0.0)
78
+ state["square_avg"] = torch.zeros_like(
79
+ param, memory_format=torch.preserve_format
80
+ )
81
+ state["acc_delta"] = torch.zeros_like(
82
+ param, memory_format=torch.preserve_format
83
+ )
84
+
85
+ state = self.state[param]
86
+ square_avgs.append(state["square_avg"])
87
+ acc_deltas.append(state["acc_delta"])
88
+
89
+ with torch.no_grad():
90
+ F.adadelta(
91
+ params_with_grad,
92
+ grads,
93
+ square_avgs,
94
+ acc_deltas,
95
+ lr=lr,
96
+ rho=rho,
97
+ eps=eps,
98
+ weight_decay=weight_decay,
99
+ foreach=self.foreach,
100
+ maximize=self.maximize,
101
+ has_complex=has_complex
102
+ )
llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adagrad.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Optional
2
+
3
+ import torch
4
+ import torch.optim._functional as F
5
+
6
+ from torch import Tensor
7
+
8
+ __all__: List[str] = []
9
+
10
+ # Define a TorchScript compatible Functional Adagrad Optimizer
11
+ # where we use these optimizer in a functional way.
12
+ # Instead of using the `param.grad` when updating parameters,
13
+ # we explicitly let the user pass gradients to the `step` function
14
+ # this is so that we could separate the gradients and parameters
15
+ # and allow multithreaded trainer to update the parameters
16
+ # without data traces on accumulating to the same .grad.
17
+ # NOTE: This should be only used by distributed optimizer internals
18
+ # and not meant to expose to the user.
19
+ @torch.jit.script
20
+ class _FunctionalAdagrad:
21
+ def __init__(
22
+ self,
23
+ params: List[Tensor],
24
+ lr: float = 1e-2,
25
+ lr_decay: float = 0.0,
26
+ weight_decay: float = 0.0,
27
+ initial_accumulator_value: float = 0.0,
28
+ warmup_lr_multiplier: float = 1.0,
29
+ warmup_num_iters: float = 0.0,
30
+ eps: float = 1e-10,
31
+ coalesce_grad: bool = True,
32
+ foreach: bool = False,
33
+ maximize: bool = False,
34
+ _allow_empty_param_list: bool = False,
35
+ ):
36
+ self.defaults = {
37
+ "lr": lr,
38
+ "lr_decay": lr_decay,
39
+ "eps": eps,
40
+ "weight_decay": weight_decay,
41
+ "initial_accumulator_value": initial_accumulator_value,
42
+ "warmup_lr_multiplier": warmup_lr_multiplier,
43
+ "warmup_num_iters": warmup_num_iters,
44
+ }
45
+ self.coalesce_grad = coalesce_grad
46
+ self.foreach = foreach
47
+ self.maximize = maximize
48
+ self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {})
49
+
50
+ if len(params) == 0 and not _allow_empty_param_list:
51
+ raise ValueError("optimizer got an empty parameter list")
52
+
53
+ # NOTE: we only have one param_group and don't allow user to add additional
54
+ # param group as it's not a common use case.
55
+ self.param_group = {"params": params}
56
+
57
+ # TODO: no union or any types in TorchScript, make step a scalar tensor instead
58
+ # This is also needed by if we want to share_memory on the step across processes
59
+ for p in self.param_group["params"]:
60
+ self.state[p] = {
61
+ "sum": torch.full_like(p.data, initial_accumulator_value),
62
+ "step": torch.tensor(0.0),
63
+ }
64
+
65
+ def step(self, gradients: List[Optional[Tensor]]):
66
+ params = self.param_group["params"]
67
+ params_with_grad = []
68
+ grads = []
69
+ state_sums = []
70
+ state_steps: List[Tensor] = []
71
+
72
+ if len(params) != len(gradients):
73
+ raise ValueError(
74
+ "the gradients passed in does not equal to the size of the parameters!"
75
+ + f"Params length: {len(params)}. "
76
+ + f"Gradients length: {len(gradients)}"
77
+ )
78
+
79
+ has_sparse_grad, has_complex = False, False
80
+ for param, gradient in zip(self.param_group["params"], gradients):
81
+ if gradient is not None:
82
+ has_sparse_grad |= gradient.is_sparse
83
+ has_complex |= torch.is_complex(param)
84
+ params_with_grad.append(param)
85
+ grads.append(gradient)
86
+ state = self.state[param]
87
+ state_sums.append(state["sum"])
88
+ state_steps.append(state["step"])
89
+
90
+ with torch.no_grad():
91
+ F.adagrad(
92
+ params,
93
+ grads,
94
+ state_sums,
95
+ state_steps,
96
+ lr=self.defaults["lr"],
97
+ weight_decay=self.defaults["weight_decay"],
98
+ lr_decay=self.defaults["lr_decay"],
99
+ eps=self.defaults["eps"],
100
+ has_sparse_grad=has_sparse_grad,
101
+ foreach=self.foreach,
102
+ maximize=self.maximize,
103
+ has_complex=has_complex,
104
+ )