diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_autograd.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_autograd.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4b62950fe85c131f0faf83562d5822ea68133298 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_autograd.pyi @@ -0,0 +1,123 @@ +from enum import Enum +from typing import Any, Callable, List, Optional, Set + +import torch + +from ._profiler import ( + _ProfilerEvent, + ActiveProfilerType, + ProfilerActivity, + ProfilerConfig, +) + +# Defined in tools/autograd/init.cpp + +class DeviceType(Enum): + CPU = ... + CUDA = ... + MKLDNN = ... + OPENGL = ... + OPENCL = ... + IDEEP = ... + HIP = ... + FPGA = ... + ORT = ... + XLA = ... + MPS = ... + HPU = ... + Meta = ... + Vulkan = ... + Metal = ... + PrivateUse1 = ... + +class ProfilerEvent: + def cpu_elapsed_us(self, other: ProfilerEvent) -> float: ... + def cpu_memory_usage(self) -> int: ... + def cuda_elapsed_us(self, other: ProfilerEvent) -> float: ... + def privateuse1_elapsed_us(self, other: ProfilerEvent) -> float: ... + def cuda_memory_usage(self) -> int: ... + def device(self) -> int: ... + def handle(self) -> int: ... + def has_cuda(self) -> bool: ... + def is_remote(self) -> bool: ... + def kind(self) -> int: ... + def name(self) -> str: ... + def node_id(self) -> int: ... + def sequence_nr(self) -> int: ... + def shapes(self) -> List[List[int]]: ... + def thread_id(self) -> int: ... + def flops(self) -> float: ... + def is_async(self) -> bool: ... + +class _KinetoEvent: + def name(self) -> str: ... + def device_index(self) -> int: ... + def start_us(self) -> int: ... + def duration_us(self) -> int: ... + def is_async(self) -> bool: ... + def linked_correlation_id(self) -> int: ... + def shapes(self) -> List[List[int]]: ... + def dtypes(self) -> List[str]: ... + def concrete_inputs(self) -> List[Any]: ... + def device_type(self) -> DeviceType: ... + def start_thread_id(self) -> int: ... + def end_thread_id(self) -> int: ... + def correlation_id(self) -> int: ... + def fwd_thread_id(self) -> int: ... + def stack(self) -> List[str]: ... + def scope(self) -> int: ... + def sequence_nr(self) -> int: ... + def flops(self) -> int: ... + def cuda_elapsed_us(self) -> int: ... + def privateuse1_elapsed_us(self) -> int: ... + +class _ProfilerResult: + def events(self) -> List[_KinetoEvent]: ... + def legacy_events(self) -> List[List[ProfilerEvent]]: ... + def save(self, path: str) -> None: ... + def experimental_event_tree(self) -> List[_ProfilerEvent]: ... + def trace_start_us(self) -> int: ... + +class SavedTensor: ... + +def _enable_profiler( + config: ProfilerConfig, + activities: Set[ProfilerActivity], +) -> None: ... +def _prepare_profiler( + config: ProfilerConfig, + activities: Set[ProfilerActivity], +) -> None: ... +def _disable_profiler() -> _ProfilerResult: ... +def _profiler_enabled() -> bool: ... +def _add_metadata_json(key: str, value: str) -> None: ... +def _kineto_step() -> None: ... +def _get_sequence_nr() -> int: ... +def kineto_available() -> bool: ... +def _record_function_with_args_enter(name: str, *args) -> torch.Tensor: ... +def _record_function_with_args_exit(handle: torch.Tensor) -> None: ... +def _supported_activities() -> Set[ProfilerActivity]: ... +def _enable_record_function(enable: bool) -> None: ... +def _set_empty_test_observer(is_global: bool, sampling_prob: float) -> None: ... +def _push_saved_tensors_default_hooks( + pack_hook: Callable[[torch.Tensor], Any], + unpack_hook: Callable[[Any], torch.Tensor], +) -> None: ... +def _pop_saved_tensors_default_hooks() -> None: ... +def _unsafe_set_version_counter(t: torch.Tensor, prev_version: int) -> None: ... +def _enable_profiler_legacy(config: ProfilerConfig) -> None: ... +def _disable_profiler_legacy() -> List[List[ProfilerEvent]]: ... +def _profiler_type() -> ActiveProfilerType: ... +def _saved_tensors_hooks_enable() -> None: ... +def _saved_tensors_hooks_disable(message: str) -> None: ... +def _saved_tensors_hooks_get_disabled_error_message() -> Optional[str]: ... + +class CreationMeta(Enum): + DEFAULT = ... + IN_CUSTOM_FUNCTION = ... + MULTI_OUTPUT_NODE = ... + NO_GRAD_MODE = ... + INFERENCE_MODE = ... + +def _set_creation_meta(t: torch.Tensor, creation_meta: CreationMeta) -> None: ... +def _get_creation_meta(t: torch.Tensor) -> CreationMeta: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_cpu.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_cpu.pyi new file mode 100644 index 0000000000000000000000000000000000000000..075fecf45d5a239849dd276a5ca79b4d30ed6120 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_cpu.pyi @@ -0,0 +1,5 @@ +from torch.types import _bool + +# Defined in torch/csrc/cpu/Module.cpp + +def _is_cpu_support_vnni() -> _bool: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_cudnn.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_cudnn.pyi new file mode 100644 index 0000000000000000000000000000000000000000..689c984b9d7de1ca98329495223dcb0a13a54f4e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_cudnn.pyi @@ -0,0 +1,17 @@ +from enum import Enum + +from torch.types import _bool, Tuple + +# Defined in torch/csrc/cuda/shared/cudnn.cpp +is_cuda: _bool + +def getRuntimeVersion() -> Tuple[int, int, int]: ... +def getCompileVersion() -> Tuple[int, int, int]: ... +def getVersionInt() -> int: ... + +class RNNMode(int, Enum): + value: int + rnn_relu = ... + rnn_tanh = ... + lstm = ... + gru = ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f4c91304a1b1a83c24bfbfd108dc234aaa3615bc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_autograd.pyi @@ -0,0 +1,26 @@ +from typing import Any, Dict, List, Set + +import torch + +# This module is defined in torch/csrc/distributed/autograd/init.cpp + +class DistAutogradContext: + def _context_id(self) -> int: ... + def _recv_functions(self) -> Dict[int, Any]: ... + def _send_functions(self) -> Dict[int, Any]: ... + def _known_worker_ids(self) -> Set[int]: ... + +def _new_context() -> DistAutogradContext: ... +def _release_context(context_id: int) -> None: ... +def _get_max_id() -> int: ... +def _is_valid_context(worker_id: int) -> bool: ... +def _retrieve_context(context_id: int) -> DistAutogradContext: ... +def _current_context() -> DistAutogradContext: ... +def _init(worker_id: int) -> None: ... +def _get_debug_info() -> Dict[str, str]: ... +def backward( + context_id: int, + roots: List[torch.Tensor], + retain_graph=False, +) -> None: ... +def get_gradients(context_id: int) -> Dict[torch.Tensor, torch.Tensor]: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9a6aaa23023cb49ab1c49691ef8dffc03d1403c1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_c10d.pyi @@ -0,0 +1,590 @@ +# mypy: disable-error-code="type-arg" +from datetime import timedelta +from enum import Enum +from typing import Any, Dict, List, Optional, overload, Tuple, Union + +import torch +from torch import Tensor +from torch._C import ScriptObject +from torch.futures import Future + +# This module is defined in torch/csrc/distributed/c10d/init.cpp + +_DEFAULT_FIRST_BUCKET_BYTES: int +_DEFAULT_NO_TIMEOUT: timedelta +_DEFAULT_PG_TIMEOUT: timedelta +_DEFAULT_PG_NCCL_TIMEOUT: timedelta + +class BuiltinCommHookType(Enum): + ALLREDUCE = ... + FP16_COMPRESS = ... + +def _register_comm_hook(reducer: Reducer, state: Any, comm_hook: Any): ... +def _register_builtin_comm_hook( + reducer: Reducer, + comm_hook_type: BuiltinCommHookType, +): ... +def _set_global_rank(rank: int) -> None: ... +def _hash_tensors(tensors: List[Tensor]) -> int: ... + +class GradBucket: + def index(self) -> int: ... + def buffer(self) -> Tensor: ... + def gradients(self) -> List[Tensor]: ... + def is_last(self) -> bool: ... + def set_buffer(self, tensor: Tensor) -> None: ... + def parameters(self) -> List[Tensor]: ... + +class Reducer: + def __init__( + self, + params: List[Tensor], + bucket_indices: List[List[int]], + per_bucket_size_limits: List[int], + process_group: ProcessGroup, + expect_sparse_gradients: List[bool] = ..., + bucket_bytes_cap: int = ..., # kDefaultBucketBytesCap in reducer.hpp + find_unused_parameters: bool = ..., + gradient_as_bucket_view: bool = ..., + param_to_name_mapping: Dict[int, str] = ..., + first_bucket_types_cap: int = ..., # kDefaultFirstBucketBytes in reducer.hpp + ): ... + def prepare_for_forward(self) -> None: ... + def prepare_for_backward(self, output: List[Tensor]) -> None: ... + def get_backward_stats(self) -> List[int]: ... + def _install_post_backward_futures(self, futures: List[Future]) -> None: ... + def _rebuild_buckets(self) -> bool: ... + def _get_zeros_like_grad_buckets(self) -> List[GradBucket]: ... + def _push_all_rebuilt_params(self) -> None: ... + def _set_forward_pass_work_handle( + self, + work: Work, + use_static_world_size: bool, + ): ... + def _get_local_used_map(self) -> Tensor: ... + def _set_ddp_runtime_logging_sample_rate(self, sample_rate: int) -> None: ... + def _set_static_graph(self) -> None: ... + def _run_comm_hook(self, bucket: GradBucket) -> Future: ... + def set_logger(self, logger: Logger) -> None: ... + def _remove_autograd_hooks(self) -> None: ... + def _check_reducer_finalized(self) -> None: ... + def _set_sparse_metadata(self, global_unique_ids: Dict[str, Tensor]) -> None: ... + def _reset_state(self) -> None: ... + def _update_process_group(self, new_process_group: ProcessGroup) -> None: ... + +class DDPLoggingData: + strs_map: Dict[str, str] + ints_map: Dict[str, int] + +class Logger: + def __init__(self, reducer: Reducer): ... + def set_construction_data_and_log( + self, + module_name: str, + device_ids: List[int], + output_device: int, + broadcast_buffers: bool, + has_sync_bn: bool, + static_graph: bool, + ): ... + def set_runtime_stats_and_log(self) -> None: ... + def set_error_and_log(self, error: str) -> None: ... + def _get_ddp_logging_data(self) -> DDPLoggingData: ... + def _set_comm_hook_name(self, comm_hook: str) -> None: ... + def _set_uneven_input_join(self) -> None: ... + def _set_static_graph(self) -> None: ... + +def get_debug_level(): ... +def set_debug_level(): ... +def set_debug_level_from_env(): ... + +class DebugLevel(Enum): + OFF = ... + INFO = ... + DETAIL = ... + +class ReduceOp: + def __init__(self, op: RedOpType): ... + + SUM: RedOpType = ... + AVG: RedOpType = ... + PRODUCT: RedOpType = ... + MIN: RedOpType = ... + MAX: RedOpType = ... + BAND: RedOpType = ... + BOR: RedOpType = ... + BXOR: RedOpType = ... + PREMUL_SUM: RedOpType = ... + UNUSED: RedOpType = ... + + class RedOpType(Enum): ... + +class BroadcastOptions: + rootRank: int + rootTensor: int + timeout: timedelta + asyncOp: bool + +class AllreduceOptions: + reduceOp: ReduceOp + timeout: timedelta + +class AllreduceCoalescedOptions(AllreduceOptions): ... + +class ReduceOptions: + reduceOp: ReduceOp + rootRank: int + rootTensor: int + timeout: timedelta + +class AllgatherOptions: + timeout: timedelta + asyncOp: bool + +class GatherOptions: + rootRank: int + timeout: timedelta + +class ScatterOptions: + rootRank: int + timeout: timedelta + asyncOp: bool + +class ReduceScatterOptions: + reduceOp: ReduceOp + timeout: timedelta + asyncOp: bool + +class BarrierOptions: + device_ids: List[int] + device: torch.device + timeout: timedelta + +class AllToAllOptions: + timeout: timedelta + +class Store: + def set(self, key: str, value: str): ... + def get(self, key: str) -> bytes: ... + def add(self, key: str, value: int) -> int: ... + def compare_set( + self, + key: str, + expected_value: str, + desired_value: str, + ) -> bytes: ... + def delete_key(self, key: str) -> bool: ... + def num_keys(self) -> int: ... + def set_timeout(self, timeout: timedelta): ... + @overload + def wait(self, keys: List[str]): ... + @overload + def wait(self, keys: List[str], timeout: timedelta): ... + +class FileStore(Store): + def __init__(self, path: str, numWorkers: int = ...): ... + +class HashStore(Store): + def __init__(self): ... + +class TCPStore(Store): + def __init__( + self, + host_name: str, + port: int, + world_size: Optional[int] = ..., + is_master: bool = ..., + timeout: timedelta = ..., + wait_for_workers: bool = ..., + multi_tenant: bool = ..., + master_listen_fd: Optional[int] = ..., + use_libuv: Optional[bool] = ..., + ): ... + @property + def host(self) -> str: ... + @property + def port(self) -> int: ... + +class PrefixStore(Store): + def __init__(self, prefix: str, store: Store): ... + @property + def underlying_store(self) -> Store: ... + +class _DistributedBackendOptions: + def __init__(self): ... + @property + def store(self) -> Store: ... + @store.setter + def store(self, store: Store) -> None: ... + @property + def group_rank(self) -> int: ... + @group_rank.setter + def group_rank(self, rank: int) -> None: ... + @property + def group_size(self) -> int: ... + @group_size.setter + def group_size(self, size: int) -> None: ... + @property + def timeout(self) -> timedelta: ... + @timeout.setter + def timeout(self, timeout: timedelta) -> None: ... + @property + def group_id(self) -> str: ... + @group_id.setter + def group_id(self, group_id: str) -> None: ... + @property + def global_ranks_in_group(self) -> List[int]: ... + @global_ranks_in_group.setter + def global_ranks_in_group(self, ranks: List[int]) -> None: ... + +class Work: + def is_completed(self) -> bool: ... + def is_success(self) -> bool: ... + def exception(self) -> Any: ... + def wait(self, timeout: timedelta = ...) -> bool: ... + def get_future(self) -> Future: ... + def source_rank(self) -> int: ... + def _source_rank(self) -> int: ... + def result(self) -> List[Tensor]: ... + def synchronize(self): ... + def boxed(self) -> ScriptObject: ... + @staticmethod + def unbox(obj: ScriptObject) -> Work: ... + +class Backend: + def __init__( + self, + rank: int, + size: int, + ): ... + @property + def supports_splitting(self) -> bool: ... + def rank(self) -> int: ... + def size(self) -> int: ... + def eager_connect_single_device(self, device: Optional[torch.device]) -> None: ... + def _set_sequence_number_for_group(self) -> None: ... + +class ProcessGroup: + class Options: + def __init__(self, backend: str, timeout: timedelta = ...): ... + @property + def backend(self) -> str: ... + @property + def _timeout(self) -> timedelta: ... + @_timeout.setter + def _timeout(self, val: timedelta) -> None: ... + + class BackendType(Enum): + UNDEFINED = ... + GLOO = ... + NCCL = ... + UCC = ... + MPI = ... + CUSTOM = ... + def __init__(self, store: Store, rank: int, size: int, options: Options): ... + def rank(self) -> int: ... + def size(self) -> int: ... + @overload + def broadcast( + self, + tensors: List[Tensor], + opts=..., + ) -> Work: ... + @overload + def broadcast( + self, + tensor: Tensor, + root: int, + ) -> Work: ... + @overload + def allreduce( + self, + tensors: List[Tensor], + opts: AllreduceOptions = ..., + ) -> Work: ... + @overload + def allreduce( + self, + tensors: List[Tensor], + op=..., + ) -> Work: ... + @overload + def allreduce( + self, + tensor: Tensor, + op=..., + ) -> Work: ... + def allreduce_coalesced( + self, + tensors: List[Tensor], + opts=..., + ) -> Work: ... + def reduce_scatter_tensor_coalesced( + self, + outputTensors: List[Tensor], + inputTensors: List[Tensor], + opts: Optional[ReduceScatterOptions] = None, + ) -> Work: ... + @overload + def reduce( + self, + tensors: List[Tensor], + opts=..., + ) -> Work: ... + @overload + def reduce( + self, + tensor: Tensor, + root: int, + op=..., + ) -> Work: ... + @overload + def allgather( + self, + output_tensors: List[List[Tensor]], + input_tensors: List[Tensor], + opts=..., + ) -> Work: ... + @overload + def allgather( + self, + output_tensors: List[Tensor], + input_tensor: Tensor, + ) -> Work: ... + def _allgather_base( + self, + output: Tensor, + input: Tensor, + opts=..., + ) -> Work: ... + def allgather_coalesced( + self, + output_lists: List[List[Tensor]], + input_list: List[Tensor], + opts=..., + ) -> Work: ... + def allgather_into_tensor_coalesced( + self, + output_lists: List[Tensor], + input_list: List[Tensor], + opts=..., + ) -> Work: ... + @overload + def gather( + self, + output_tensors: List[List[Tensor]], + input_tensors: List[Tensor], + opts=..., + ) -> Work: ... + @overload + def gather( + self, + output_tensors: List[Tensor], + input_tensor: Tensor, + root: int, + ) -> Work: ... + @overload + def scatter( + self, + output_tensors: List[Tensor], + input_tensors: List[List[Tensor]], + opts=..., + ) -> Work: ... + @overload + def scatter( + self, + output_tensor: Tensor, + input_tensors: List[Tensor], + root: int, + ) -> Work: ... + @overload + def reduce_scatter( + self, + output_tensors: List[Tensor], + input_tensors: List[List[Tensor]], + opts=..., + ) -> Work: ... + @overload + def reduce_scatter( + self, + output_tensors: Tensor, + input_tensor: List[Tensor], + ) -> Work: ... + def _reduce_scatter_base( + self, + outputTensor: Tensor, + inputTensor: Tensor, + opts: Optional[ReduceScatterOptions], + ) -> Work: ... + @overload + def alltoall_base( + self, + output_tensor: Tensor, + input_tensor: Tensor, + output_split_sizes: List[int], + input_split_sizes: List[int], + opts=..., + ) -> Work: ... + @overload + def alltoall_base( + self, + output: Tensor, + input: Tensor, + output_split_sizes: List[int], + input_split_sizes: List[int], + ) -> Work: ... + @overload + def alltoall( + self, + output_tensor: List[Tensor], + input_tensor: List[Tensor], + opts=..., + ) -> Work: ... + @overload + def alltoall( + self, + output: List[Tensor], + input: List[Tensor], + ) -> Work: ... + def send( + self, + tensors: List[Tensor], + dstRank: int, + tag: int, + ) -> Work: ... + def recv( + self, + tensors: List[Tensor], + srcRank: int, + tag: int, + ) -> Work: ... + def recv_anysource(self, tensors: List[Tensor], tag: int) -> Work: ... + def barrier(self, opts=...) -> Work: ... + def boxed(self) -> ScriptObject: ... + @staticmethod + def unbox(obj: ScriptObject) -> ProcessGroup: ... + def _start_coalescing(self, device: torch.device) -> None: ... + def _end_coalescing(self, device: torch.device) -> Work: ... + def _get_backend_name(self) -> str: ... + def _backend_id(self, backend_type: BackendType) -> int: ... + @property + def _device_types(self) -> List[torch.device]: ... + def _get_backend(self, device: torch.device) -> Backend: ... + def _register_backend( + self, + device: torch.device, + backend_type: BackendType, + backend: Optional[Backend], + ) -> None: ... + def _set_group_name(self, name: str) -> None: ... + def name(self) -> str: ... + def _has_hooks(self) -> bool: ... + def _wait_for_pending_works(self) -> None: ... + def _set_sequence_number_for_group(self) -> None: ... + @property + def bound_device_id(self) -> Optional[torch.device]: ... + @bound_device_id.setter + def bound_device_id(self, device: Optional[torch.device]) -> None: ... + @property + def group_name(self) -> str: ... + +class ProcessGroupRoundRobin(ProcessGroup): ... + +def _round_robin_process_groups( + process_groups: List[ProcessGroup], +) -> ProcessGroupRoundRobin: ... + +class ProcessGroupGloo(Backend): + class Device: ... + class Options: ... + + def __init__( + self, + store: Store, + rank: int, + size: int, + timeout: timedelta, + ): ... + @staticmethod + def create_device(hostname="", interface="") -> Device: ... + @staticmethod + def create_default_device() -> Device: ... + def _set_default_timeout(self, timeout) -> None: ... + +class _ProcessGroupWrapper(Backend): + def __init__(self, pg: Backend, gloo_pg: ProcessGroupGloo): ... + wrapped_pg: Backend + +class ProcessGroupNCCL(Backend): + class Options: + def __init__(self, timeout: Optional[timedelta] = None): ... + @property + def backend(self) -> str: ... + @property + def _timeout(self) -> timedelta: ... + @_timeout.setter + def _timeout(self, val: timedelta) -> None: ... + @property + def _is_high_priority_stream(self) -> bool: ... + @_is_high_priority_stream.setter + def _is_high_priority_stream(self, val: bool) -> None: ... + + def __init__( + self, + store: Store, + rank: int, + size: int, + timeout: timedelta, + ): ... + def _group_start(self) -> None: ... + def _group_end(self) -> None: ... + def _set_default_timeout(self, timeout) -> None: ... + def _shutdown(self) -> None: ... + @property + def uid(self) -> int: ... + +class ProcessGroupUCC(Backend): + def __init__( + self, + store: Store, + rank: int, + size: int, + timeout: timedelta, + ): ... + +class ProcessGroupMPI(Backend): + def __init__( + self, + rank: int, + size: int, + pgComm: int, + ): ... + @staticmethod + def create(ranks: List[int]) -> ProcessGroupMPI: ... + +def _compute_bucket_assignment_by_size( + tensors: List[Tensor], + bucket_size_limits: List[int], + expect_sparse_gradient: List[bool] = ..., + tensor_indices: List[int] = ..., +) -> Tuple[List[List[int]], List[int]]: ... +def _broadcast_coalesced( + process_group: ProcessGroup, + tensors: List[Tensor], + buffer_size: int, + src: int, +): ... +def _test_python_store(store: Store): ... +def _verify_params_across_processes( + process_group: ProcessGroup, + params: List[Tensor], + logger: Optional[Logger], +): ... +def _make_nccl_premul_sum(factor: Union[float, List[Tensor]]) -> ReduceOp: ... +def _register_process_group( + group_name: str, + process_group: ProcessGroup, +) -> None: ... +def _resolve_process_group(group_name: str) -> ProcessGroup: ... +def _unregister_all_process_groups() -> None: ... +def _unregister_process_group(group_name: str) -> None: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi new file mode 100644 index 0000000000000000000000000000000000000000..7909e0b8e33c6a6e3ee72f2bbbac40b914ddce93 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_rpc.pyi @@ -0,0 +1,188 @@ +# mypy: disable-error-code="type-arg" +from datetime import timedelta +from typing import Any, Dict, Generic, List, Optional, overload, Tuple, Type, TypeVar + +import torch + +from . import Future +from ._autograd import ProfilerEvent +from ._distributed_c10d import Store +from ._profiler import ProfilerConfig + +# This module is defined in torch/csrc/distributed/rpc/init.cpp + +_DEFAULT_INIT_METHOD: str +_DEFAULT_NUM_WORKER_THREADS: int +_UNSET_RPC_TIMEOUT: float +_DEFAULT_RPC_TIMEOUT_SEC: float + +_T = TypeVar("_T") + +class RpcBackendOptions: + rpc_timeout: float + init_method: str + def __init__( + self, + rpc_timeout: float = ..., + init_method: str = ..., + ): ... + +class WorkerInfo: + def __init__(self, name: str, worker_id: int): ... + @property + def name(self) -> str: ... + @property + def id(self) -> int: ... + def __eq__(self, other: object) -> bool: ... + +class RpcAgent: + def join(self, shutdown: bool = False, timeout: float = 0): ... + def sync(self): ... + def shutdown(self): ... + @overload + def get_worker_info(self) -> WorkerInfo: ... + @overload + def get_worker_info(self, workerName: str) -> WorkerInfo: ... + def get_worker_infos(self) -> List[WorkerInfo]: ... + def _get_device_map(self, dst: WorkerInfo) -> Dict[torch.device, torch.device]: ... + def get_debug_info(self) -> Dict[str, str]: ... + def get_metrics(self) -> Dict[str, str]: ... + +class PyRRef(Generic[_T]): + def __init__(self, value: _T, type_hint: Any = None) -> None: ... + def is_owner(self) -> bool: ... + def confirmed_by_owner(self) -> bool: ... + def owner(self) -> WorkerInfo: ... + def owner_name(self) -> str: ... + def to_here(self, timeout: float = ...) -> _T: ... + def local_value(self) -> Any: ... + def rpc_sync(self, timeout: float = ...) -> Any: ... + def rpc_async(self, timeout: float = ...) -> Any: ... + def remote(self, timeout: float = ...) -> Any: ... + def _serialize(self) -> Tuple: ... + @staticmethod + def _deserialize(tp: Tuple) -> PyRRef: ... + def _get_type(self) -> Type[_T]: ... + def _get_future(self) -> Future[_T]: ... + def _get_profiling_future(self) -> Future[_T]: ... + def _set_profiling_future(self, profilingFuture: Future[_T]): ... + +class _TensorPipeRpcBackendOptionsBase(RpcBackendOptions): + num_worker_threads: int + device_maps: Dict[str, Dict[torch.device, torch.device]] + devices: List[torch.device] + def __init__( + self, + num_worker_threads: int, + _transports: Optional[List], + _channels: Optional[List], + rpc_timeout: float = ..., + init_method: str = ..., + device_maps: Dict[str, Dict[torch.device, torch.device]] = {}, # noqa: B006 + devices: List[torch.device] = [], # noqa: B006 + ): ... + def _set_device_map( + self, + to: str, + device_map: Dict[torch.device, torch.device], + ): ... + +class TensorPipeAgent(RpcAgent): + def __init__( + self, + store: Store, + name: str, + worker_id: int, + world_size: Optional[int], + opts: _TensorPipeRpcBackendOptionsBase, + reverse_device_maps: Dict[str, Dict[torch.device, torch.device]], + devices: List[torch.device], + ): ... + def join(self, shutdown: bool = False, timeout: float = 0): ... + def shutdown(self): ... + @overload + def get_worker_info(self) -> WorkerInfo: ... + @overload + def get_worker_info(self, workerName: str) -> WorkerInfo: ... + @overload + def get_worker_info(self, id: int) -> WorkerInfo: ... + def get_worker_infos(self) -> List[WorkerInfo]: ... + def _get_device_map(self, dst: WorkerInfo) -> Dict[torch.device, torch.device]: ... + def _update_group_membership( + self, + worker_info: WorkerInfo, + my_devices: List[torch.device], + reverse_device_map: Dict[str, Dict[torch.device, torch.device]], + is_join: bool, + ): ... + def _get_backend_options(self) -> _TensorPipeRpcBackendOptionsBase: ... + @property + def is_static_group(self) -> bool: ... + @property + def store(self) -> Store: ... + +def _is_current_rpc_agent_set() -> bool: ... +def _get_current_rpc_agent() -> RpcAgent: ... +def _set_and_start_rpc_agent(agent: RpcAgent): ... +def _reset_current_rpc_agent(): ... +def _delete_all_user_and_unforked_owner_rrefs(timeout: timedelta = ...): ... +def _destroy_rref_context(ignoreRRefLeak: bool): ... +def _rref_context_get_debug_info() -> Dict[str, str]: ... +def _cleanup_python_rpc_handler(): ... +def _invoke_rpc_builtin( + dst: WorkerInfo, + opName: str, + rpcTimeoutSeconds: float, + *args: Any, + **kwargs: Any, +): ... +def _invoke_rpc_python_udf( + dst: WorkerInfo, + pickledPythonUDF: str, + tensors: List[torch.Tensor], + rpcTimeoutSeconds: float, + isAsyncExecution: bool, +): ... +def _invoke_rpc_torchscript( + dstWorkerName: str, + qualifiedNameStr: str, + argsTuple: Tuple, + kwargsDict: Dict, + rpcTimeoutSeconds: float, + isAsyncExecution: bool, +): ... +def _invoke_remote_builtin( + dst: WorkerInfo, + opName: str, + rpcTimeoutSeconds: float, + *args: Any, + **kwargs: Any, +): ... +def _invoke_remote_python_udf( + dst: WorkerInfo, + pickledPythonUDF: str, + tensors: List[torch.Tensor], + rpcTimeoutSeconds: float, + isAsyncExecution: bool, +): ... +def _invoke_remote_torchscript( + dstWorkerName: WorkerInfo, + qualifiedNameStr: str, + rpcTimeoutSeconds: float, + isAsyncExecution: bool, + *args: Any, + **kwargs: Any, +): ... +def get_rpc_timeout() -> float: ... +def enable_gil_profiling(flag: bool): ... +def _set_rpc_timeout(rpcTimeoutSeconds: float): ... + +class RemoteProfilerManager: + @staticmethod + def set_current_profiling_key(key: str): ... + +def _enable_server_process_global_profiler(new_config: ProfilerConfig): ... +def _disable_server_process_global_profiler() -> List[List[List[ProfilerEvent]]]: ... +def _set_profiler_node_id(default_node_id: int): ... +def _enable_jit_rref_pickle(): ... +def _disable_jit_rref_pickle(): ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1ed8304bc6378bb1fbd5e2d15369fe66969acae4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_distributed_rpc_testing.pyi @@ -0,0 +1,35 @@ +from typing import Dict, List + +import torch + +from ._distributed_c10d import Store +from ._distributed_rpc import _TensorPipeRpcBackendOptionsBase, TensorPipeAgent + +# This module is defined in torch/csrc/distributed/rpc/testing/init.cpp + +class FaultyTensorPipeRpcBackendOptions(_TensorPipeRpcBackendOptionsBase): + def __init__( + self, + num_worker_threads: int, + rpc_timeout: float, + init_method: str, + messages_to_fail: List[str], + messages_to_delay: Dict[str, float], + num_fail_sends: int, + ): ... + num_send_recv_threads: int + messages_to_fail: List[str] + messages_to_delay: Dict[str, float] + num_fail_sends: int + +class FaultyTensorPipeAgent(TensorPipeAgent): + def __init__( + self, + store: Store, + name: str, + rank: int, + world_size: int, + options: FaultyTensorPipeRpcBackendOptions, + reverse_device_maps: Dict[str, Dict[torch.device, torch.device]], + devices: List[torch.device], + ): ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_functorch.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_functorch.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d36d0efae2d92d6276dd70b0dcd1d0dda4dfc3ce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_functorch.pyi @@ -0,0 +1,77 @@ +from enum import Enum +from typing import Optional, Tuple + +from torch import Tensor + +# Defined in torch/csrc/functorch/init.cpp + +def _set_dynamic_layer_keys_included(included: bool) -> None: ... +def get_unwrapped(tensor: Tensor) -> Tensor: ... +def is_batchedtensor(tensor: Tensor) -> bool: ... +def is_functionaltensor(tensor: Tensor) -> bool: ... +def is_functorch_wrapped_tensor(tensor: Tensor) -> bool: ... +def is_gradtrackingtensor(tensor: Tensor) -> bool: ... +def maybe_get_bdim(tensor: Tensor) -> int: ... +def maybe_get_level(tensor: Tensor) -> int: ... +def maybe_current_level() -> Optional[int]: ... +def unwrap_if_dead(tensor: Tensor) -> Tensor: ... +def _unwrap_for_grad(tensor: Tensor, level: int) -> Tensor: ... +def _wrap_for_grad(tensor: Tensor, level: int) -> Tensor: ... +def _unwrap_batched(tensor: Tensor, level: int) -> Tuple[Tensor, Optional[int]]: ... +def current_level() -> int: ... +def _add_batch_dim(tensor: Tensor, bdim: int, level: int) -> Tensor: ... +def set_single_level_autograd_function_allowed(allowed: bool) -> None: ... +def get_single_level_autograd_function_allowed() -> bool: ... +def _unwrap_functional_tensor(tensor: Tensor, reapply_views: bool) -> Tensor: ... +def _wrap_functional_tensor(tensor: Tensor, level: int) -> Tensor: ... +def _vmap_increment_nesting(batch_size: int, randomness: str) -> int: ... +def _vmap_decrement_nesting() -> int: ... +def _grad_increment_nesting() -> int: ... +def _grad_decrement_nesting() -> int: ... + +# Defined in aten/src/ATen/functorch/Interpreter.h +class TransformType(Enum): + Torch: TransformType = ... + Vmap: TransformType = ... + Grad: TransformType = ... + Jvp: TransformType = ... + Functionalize: TransformType = ... + +class RandomnessType(Enum): + Error: TransformType = ... + Same: TransformType = ... + Different: TransformType = ... + +class CInterpreter: + def key(self) -> TransformType: ... + def level(self) -> int: ... + +class CGradInterpreterPtr: + def __init__(self, interpreter: CInterpreter): ... + def lift(self, Tensor) -> Tensor: ... + def prevGradMode(self) -> bool: ... + +class CJvpInterpreterPtr: + def __init__(self, interpreter: CInterpreter): ... + def lift(self, Tensor) -> Tensor: ... + def prevFwdGradMode(self) -> bool: ... + +class CFunctionalizeInterpreterPtr: + def __init__(self, interpreter: CInterpreter): ... + def key(self) -> TransformType: ... + def level(self) -> int: ... + def functionalizeAddBackViews(self) -> bool: ... + +class CVmapInterpreterPtr: + def __init__(self, interpreter: CInterpreter): ... + def key(self) -> TransformType: ... + def level(self) -> int: ... + def batchSize(self) -> int: ... + def randomness(self) -> RandomnessType: ... + +class DynamicLayer: ... + +def get_interpreter_stack() -> list[CInterpreter]: ... +def peek_interpreter_stack() -> CInterpreter: ... +def pop_dynamic_layer_stack() -> DynamicLayer: ... +def push_dynamic_layer_stack(dl: DynamicLayer) -> int: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_nn.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_nn.pyi new file mode 100644 index 0000000000000000000000000000000000000000..68b90de2ae891466b8b30110877976d9468320cc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_nn.pyi @@ -0,0 +1,86 @@ +# mypy: disable-error-code="type-arg" +from typing import List, Optional, overload, Sequence, Tuple, Union + +from torch import memory_format, Tensor +from torch.types import _bool, _device, _dtype, _int, _size + +# Defined in tools/autograd/templates/python_nn_functions.cpp + +def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ... +def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ... +def avg_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ... +def avg_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> Tensor: ... +def elu_(input: Tensor, alpha: float = ...) -> Tensor: ... +def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ... +def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Union[_int, _size], _random_samples: Tensor) -> Tuple[Tensor, Tensor]: ... +def gelu(input: Tensor, approximate: str = ...) -> Tensor: ... +def hardsigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def hardtanh(input: Tensor, min_val: float = ..., max_val: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ... +def hardtanh_(input: Tensor, min_val: float = ..., max_val: float = ...) -> Tensor: ... +def leaky_relu(input: Tensor, negative_slope: float = ..., *, out: Optional[Tensor] = None) -> Tensor: ... +def leaky_relu_(input: Tensor, negative_slope: float = ...) -> Tensor: ... +def linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ... +def log_sigmoid(input: Tensor) -> Tensor: ... +def one_hot(tensor: Tensor, num_classes: int = ...) -> Tensor: ... +def pad(input: Tensor, pad: Sequence[int], mode: str = ..., value: Optional[float] = None) -> Tensor: ... +def scaled_dot_product_attention(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None) -> Tensor: ... +def softplus(input: Tensor, beta: float = ..., threshold: float = ...) -> Tensor: ... +def softshrink(input: Tensor, lambd: float = ...) -> Tensor: ... + +# Defined in aten/src/ATen/native/mkldnn/Linear.cpp +def mkldnn_linear(input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor: ... + +# Defined at aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp +def mkldnn_reorder_conv2d_weight( + self: Tensor, + padding: List, + stride: List, + dilatation: List, + groups: int, +) -> Tensor: ... +def mkldnn_reorder_conv3d_weight( + self: Tensor, + padding: List, + stride: List, + dilatation: List, + groups: int, +) -> Tensor: ... + +# Defined in aten/src/ATen/native/mkldnn/Prelu.cpp +def mkldnn_prelu(input: Tensor, weight: Tensor) -> Tensor: ... + +# Defined at tools/autograd/templates/python_nn_functions.cpp +@overload +def _parse_to( + device: _device, + dtype: _dtype, + non_blocking: _bool, + copy: _bool, + *, + memory_format: memory_format, +) -> Tuple[_device, _dtype, _bool, memory_format]: ... +@overload +def _parse_to( + dtype: _dtype, + non_blocking: _bool, + copy: _bool, + *, + memory_format: memory_format, +) -> Tuple[_device, _dtype, _bool, memory_format]: ... +@overload +def _parse_to( + tensor: Tensor, + non_blocking: _bool, + copy: _bool, + *, + memory_format: memory_format, +) -> Tuple[_device, _dtype, _bool, memory_format]: ... + +# Defined in aten/src/ATen/native/PadSequence.cpp +def pad_sequence( + sequences: List[Tensor], + batch_first: bool = False, + padding_value: float = ..., +) -> Tensor: ... +def flatten_dense_tensors(tensors: List[Tensor]) -> Tensor: ... +def unflatten_dense_tensors(flat: Tensor, tensors: List[Tensor]) -> List[Tensor]: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_onnx.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_onnx.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2e8e5a0c6611720f7cf755be86a68a83dec08476 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_onnx.pyi @@ -0,0 +1,40 @@ +# Defined in torch/csrc/onnx/init.cpp + +from enum import Enum + +_CAFFE2_ATEN_FALLBACK: bool +PRODUCER_VERSION: str + +class TensorProtoDataType(Enum): + UNDEFINED = ... + FLOAT = ... + UINT8 = ... + INT8 = ... + UINT16 = ... + INT16 = ... + INT32 = ... + INT64 = ... + STRING = ... + BOOL = ... + FLOAT16 = ... + DOUBLE = ... + UINT32 = ... + UINT64 = ... + COMPLEX64 = ... + COMPLEX128 = ... + BFLOAT16 = ... + FLOAT8E5M2 = ... + FLOAT8E4M3FN = ... + FLOAT8E5M2FNUZ = ... + FLOAT8E4M3FNUZ = ... + +class OperatorExportTypes(Enum): + ONNX = ... + ONNX_ATEN = ... + ONNX_ATEN_FALLBACK = ... + ONNX_FALLTHROUGH = ... + +class TrainingMode(Enum): + EVAL = ... + PRESERVE = ... + TRAINING = ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0f6f9ccfa27ee04f0138daa65df48470fe24d770 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/__init__.py @@ -0,0 +1,4 @@ +from .checkpoint_activation import checkpoint +from .contract import _get_registry, contract +from .fully_shard import fully_shard +from .replicate import replicate diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d9e0ef1a78e8c10d8812cdb4ec1a30515e71cc5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64eae8cf45a507b94f1a974bf360d57dc1d0bee4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_collectives.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_collectives.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b9658af9299e98f1885c71530cf4cebc0caec4c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_collectives.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22c59912c7d75e00b2ef268a08a8206961c0a581 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d376d5de3aca25de83e0a76c1a5088b8977b12a9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_init.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2de18afe6bc9b68f5faf5e42ebdbde13bf8abff Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_param.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5eb62fc134e3f7a90684d327873b1db4ef2fc426 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/_fsdp_state.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/fully_shard.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/fully_shard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cb7b8ffbae3be2565ab034a33bcb8c8db9018c8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/fsdp/__pycache__/fully_shard.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/replicate.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/replicate.py new file mode 100644 index 0000000000000000000000000000000000000000..b3205f9aff0352a390c1acbe503165c5cf07d60b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_composable/replicate.py @@ -0,0 +1,154 @@ +import weakref +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple + +import torch +import torch.nn as nn +from torch.distributed._composable_state import _State +from torch.nn.parallel import DistributedDataParallel + +from .contract import _get_registry, contract + +_ROOT_MODULE_PREFIX = "" + + +class _ReplicateState(_State): + def __init__(self) -> None: + super().__init__() + self.module: nn.Module = nn.ParameterList() + self.has_initialized: bool = False + self._param_list: nn.ParameterList = nn.ParameterList() + # TODO(@fegin): this variable is originally create for testing, we + # should remove this if possible. + self._param_names: List[str] = [] + + def _collect_params( + self, + module: nn.Module, + ignored_modules: Set[nn.Module], + ignored_params: Set[nn.Parameter], + prefix: str = _ROOT_MODULE_PREFIX, + ) -> None: + # skip if managed by fully_sharded API + if _is_fully_sharded(module): + return + + # if a module is ignored, all descendants of the module are ignored. + if module in ignored_modules: + return + + recurse_prefix = ( + f"{prefix}." if prefix != _ROOT_MODULE_PREFIX else _ROOT_MODULE_PREFIX + ) + + for n, p in module.named_parameters(recurse=False): + if p not in ignored_params: + self._param_list.append(p) + self._param_names.append(f"{recurse_prefix}{n}") + + for name, child_module in module.named_children(): + self._collect_params( + child_module, + ignored_modules, + ignored_params, + prefix=f"{recurse_prefix}{name}", + ) + + def init( + self, + module: nn.Module, + ignored_modules: Set[nn.Module], + **kwargs, + ) -> None: + if _is_fully_sharded(module): + raise RuntimeError( + "Cannot apply `replicate()` on a Module already managed by `fully_shard`" + ) + + if self.has_initialized: + return + + self.has_initialized = True + self.module = module + ignored_params = {p for m in ignored_modules for p in m.parameters()} + self._collect_params(module, ignored_modules, ignored_params) + module.register_forward_pre_hook(self.forward_pre_hook, with_kwargs=True) + module.register_forward_hook(self.forward_post_hook) # type: ignore[arg-type] + + if "device_id" in kwargs: + # replicate() supports a small usability enhancement where + # user can pass in device_id as a Union[int, torch.device] even for + # CPU devices so users don't have to change code for CPU/GPU runs. + # We derive the right device_ids to feed into DDP to support this. + if kwargs["device_id"] is not None: + device_id = kwargs["device_id"] + # Convert to device_ids that DDP expects. + if isinstance(device_id, torch.device) and device_id.type == "cpu": + # CPU modules receive device_ids None + kwargs["device_ids"] = None + else: + # GPU modules expect device_ids=[cuda_device] + kwargs["device_ids"] = [device_id] + else: + kwargs["device_ids"] = None + kwargs.pop("device_id") + + self._ddp = DistributedDataParallel(self._param_list, **kwargs) + # Weakref to the DDP instance is currently only used for testing. + replicate.state(self.module)._ddp_weakref = weakref.ref(self._ddp) + + def forward_pre_hook( + self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any] + ) -> Any: + return self._ddp._pre_forward(*args, **kwargs) + + def forward_post_hook( + self, + module: nn.Module, + input: Tuple[torch.Tensor], + output: torch.Tensor, + ) -> torch.Tensor: + return self._ddp._post_forward(output) + + +@contract(state_cls=_ReplicateState) +def replicate( + module: nn.Module, + ignored_modules: Optional[Iterable[torch.nn.Module]] = None, + **kwargs, +) -> nn.Module: + r"""Replicates a module + + Args: + module (torch.nn.Module): module to replicate + + Example:: + >>> # xdoctest: +REQUIRES(module:torch._C._distributed_c10d) + >>> module = nn.Linear(3, 3) + >>> replicate(module) + """ + torch._C._log_api_usage_once("torch.distributed.replicate") + + # TODO(fegin): using kwargs is not a good idea if we would like to make + # replicate a formal API to replace DDP. + if "device_id" in kwargs: + if not isinstance(kwargs["device_id"], (int, torch.device)): + raise RuntimeError( + "Expected device_id to be int or torch.device, " + f"but got {type(kwargs['device_id'])}" + ) + + if ignored_modules is None: + ignored_modules = {} + else: + ignored_modules = set(ignored_modules) + replicate.state(module).init(module, ignored_modules, **kwargs) + + return module + + +def _is_fully_sharded(module: nn.Module) -> bool: + r"""Check if module is marked with fully_shard.""" + registry = _get_registry(module) + if registry is None: + return False + return "fully_shard" in registry diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/pointwise_ops.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/pointwise_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..dd9ca8ef66812a72fc5be30fef8f4a964b271780 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/pointwise_ops.py @@ -0,0 +1,629 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from typing import List, Sequence, Tuple + +import torch + +from torch.distributed._tensor.op_schema import ( + _is_inplace_op, + _is_out_variant_op, + OpSchema, + OpStrategy, + PlacementStrategy, + RuntimeSchemaInfo, + StrategyType, + TupleStrategy, +) + +from torch.distributed._tensor.ops.utils import ( + generate_redistribute_costs, + infer_broadcast_dims_map, + map_placements_after_broadcast, + normalize_dim, + register_op_strategy, +) +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) +from torch.distributed.device_mesh import DeviceMesh + + +aten = torch.ops.aten +# leave the remaining pointwise_ops list here for convenience, +# Below ops are some pointwise ops that are yet to be supported, +# they might not be a complete list. +# pointwise_ops = [ +# "fake_quantize_per_channel_affine", +# "fake_quantize_per_tensor_affine", +# "floor_divide", # floor_divide is deprecated +# "frexp", # multiple output pointwise op, need to add support +# "gradient", # need investigation on this op +# "imag", # complex data type only +# "quantized_batch_norm", +# "quantized_max_pool1d", +# "quantized_max_pool2d", +# "real", # complex data type only +# ] + + +linear_pointwise_ops = [ + aten.div.Scalar, # this op is linear on the first argument, and the second argument is scalar, so it fits as a linear op. + aten.div_.Scalar, # this op is linear on the first argument, and the second argument is scalar, so it fits as a linear op. + aten.to.dtype, + aten.add.Tensor, + aten.add_.Tensor, +] + + +pointwise_ops = [ + # please keep the entries below alphabetically sorted + aten.abs.default, + aten.abs.out, + aten.abs_.default, + aten.acos.default, + aten.acos.out, + aten.acos_.default, + aten.acosh.default, + aten.acosh.out, + aten.acosh_.default, + aten.add.Scalar, + aten.add.out, + aten.add_.Scalar, + aten.addcdiv.default, + aten.addcdiv.out, + aten.addcdiv_.default, + aten.addcmul.default, + aten.addcmul.out, + aten.addcmul_.default, + aten.angle.default, + aten.angle.out, + aten.asin.default, + aten.asin.out, + aten.asin_.default, + aten.asinh.default, + aten.asinh.out, + aten.asinh_.default, + aten.atan.default, + aten.atan.out, + aten.atan2.default, + aten.atan2.out, + aten.atan2_.default, + aten.atan_.default, + aten.atanh.default, + aten.atanh.out, + aten.atanh_.default, + aten.bitwise_and.Scalar, + aten.bitwise_and.Scalar_Tensor, + aten.bitwise_and.Scalar_out, + aten.bitwise_and.Tensor, + aten.bitwise_and.Tensor_out, + aten.bitwise_and_.Scalar, + aten.bitwise_and_.Tensor, + aten.bitwise_left_shift.Scalar_Tensor, + aten.bitwise_left_shift.Tensor, + aten.bitwise_left_shift.Tensor_Scalar, + aten.bitwise_left_shift.Tensor_Scalar_out, + aten.bitwise_left_shift.Tensor_out, + aten.bitwise_left_shift_.Tensor, + aten.bitwise_left_shift_.Tensor_Scalar, + aten.bitwise_not.default, + aten.bitwise_not.out, + aten.bitwise_not_.default, + aten.bitwise_or.Scalar, + aten.bitwise_or.Scalar_Tensor, + aten.bitwise_or.Scalar_out, + aten.bitwise_or.Tensor, + aten.bitwise_or.Tensor_out, + aten.bitwise_or_.Scalar, + aten.bitwise_or_.Tensor, + aten.bitwise_right_shift.Scalar_Tensor, + aten.bitwise_right_shift.Tensor, + aten.bitwise_right_shift.Tensor_Scalar, + aten.bitwise_right_shift.Tensor_Scalar_out, + aten.bitwise_right_shift.Tensor_out, + aten.bitwise_right_shift_.Tensor, + aten.bitwise_right_shift_.Tensor_Scalar, + aten.bitwise_xor.Scalar, + aten.bitwise_xor.Scalar_Tensor, + aten.bitwise_xor.Scalar_out, + aten.bitwise_xor.Tensor, + aten.bitwise_xor.Tensor_out, + aten.bitwise_xor_.Scalar, + aten.bitwise_xor_.Tensor, + aten.ceil.default, + aten.ceil.out, + aten.ceil_.default, + aten.clamp.default, + aten.clamp.out, + aten.clamp_.default, + aten.clip.default, + aten.clip.out, + aten.clip_.default, + aten.conj_physical.default, + aten.conj_physical.out, + aten.conj_physical_.default, + aten.copysign.Scalar, + aten.copysign.Scalar_out, + aten.copysign.Tensor, + aten.copysign.out, + aten.copysign_.Scalar, + aten.copysign_.Tensor, + aten.cos.default, + aten.cos.out, + aten.cos_.default, + aten.cosh.default, + aten.cosh.out, + aten.cosh_.default, + aten.deg2rad.default, + aten.deg2rad.out, + aten.deg2rad_.default, + aten.digamma.default, + aten.digamma.out, + aten.digamma_.default, + aten.div.Tensor, + aten.div.Tensor_mode, + aten.div.out, + aten.div.out_mode, + aten.div_.Tensor, + aten.div_.Tensor_mode, + aten.eq.Tensor, + aten.eq.Tensor_out, + aten.eq.Scalar, + aten.eq.Scalar_out, + aten.erf.default, + aten.erf.out, + aten.erf_.default, + aten.erfc.default, + aten.erfc.out, + aten.erfc_.default, + aten.erfinv.default, + aten.erfinv.out, + aten.erfinv_.default, + aten.exp.default, + aten.exp.out, + aten.exp2.default, + aten.exp2.out, + aten.exp2_.default, + aten.exp_.default, + aten.expm1.default, + aten.expm1.out, + aten.expm1_.default, + aten.float_power.Scalar, + aten.float_power.Scalar_out, + aten.float_power.Tensor_Scalar, + aten.float_power.Tensor_Scalar_out, + aten.float_power.Tensor_Tensor, + aten.float_power.Tensor_Tensor_out, + aten.float_power_.Scalar, + aten.float_power_.Tensor, + aten.floor.default, + aten.floor.out, + aten.floor_.default, + aten.fmod.Scalar, + aten.fmod.Scalar_out, + aten.fmod.Tensor, + aten.fmod.Tensor_out, + aten.fmod_.Scalar, + aten.fmod_.Tensor, + aten.frac.default, + aten.frac.out, + aten.frac_.default, + aten.ge.Scalar, + aten.ge.Tensor, + aten.gelu.default, + aten.gt.Tensor, + aten.gt.Tensor_out, + aten.gt.Scalar, + aten.gt.Scalar_out, + aten.gt.Scalar, + aten.gt.Tensor, + aten.hypot.default, + aten.hypot.out, + aten.hypot_.default, + aten.i0.default, + aten.i0.out, + aten.i0_.default, + aten.igamma.default, + aten.igamma.out, + aten.igamma_.default, + aten.igammac.default, + aten.igammac.out, + aten.igammac_.default, + aten.isnan.default, + aten.ldexp.default, + aten.ldexp.out, + aten.ldexp_.default, + aten.lt.Tensor, + aten.lt.Tensor_out, + aten.lt.Scalar, + aten.lt.Scalar_out, + aten.le.Scalar, + aten.le.Tensor, + aten.lerp.Scalar, + aten.lerp.Scalar_out, + aten.lerp.Tensor, + aten.lerp.Tensor_out, + aten.lerp_.Scalar, + aten.lerp_.Tensor, + aten.lgamma.default, + aten.lgamma.out, + aten.lgamma_.default, + aten.log.default, + aten.log.out, + aten.log10.default, + aten.log10.out, + aten.log10_.default, + aten.log1p.default, + aten.log1p.out, + aten.log1p_.default, + aten.log2.default, + aten.log2.out, + aten.log2_.default, + aten.log_.default, + aten.logaddexp.default, + aten.logaddexp.out, + aten.logaddexp2.default, + aten.logaddexp2.out, + aten.logical_and.default, + aten.logical_and.out, + aten.logical_and_.default, + aten.logical_not.default, + aten.logical_not.out, + aten.logical_not_.default, + aten.logical_or.default, + aten.logical_or.out, + aten.logical_or_.default, + aten.logical_xor.default, + aten.logical_xor.out, + aten.logical_xor_.default, + aten.logit.default, + aten.logit.out, + aten.logit_.default, + aten.masked_fill.Scalar, + aten.maximum.out, + aten.mul.Scalar, + aten.mul.Tensor, + aten.mul.out, + aten.mul_.Scalar, + aten.mul_.Tensor, + aten.mvlgamma.default, + aten.mvlgamma.out, + aten.mvlgamma_.default, + aten.native_dropout_backward.default, + aten.native_dropout_backward.out, + aten.nan_to_num.default, + aten.nan_to_num.out, + aten.nan_to_num_.default, + aten.ne.Scalar, + aten.neg.default, + aten.neg.out, + aten.neg_.default, + aten.nextafter.default, + aten.nextafter.out, + aten.nextafter_.default, + aten.polygamma.default, + aten.polygamma.out, + aten.polygamma_.default, + aten.positive.default, + aten.pow.Scalar, + aten.pow.Scalar_out, + aten.pow.Tensor_Scalar, + aten.pow.Tensor_Scalar_out, + aten.pow.Tensor_Tensor, + aten.pow.Tensor_Tensor_out, + aten.pow_.Scalar, + aten.pow_.Tensor, + aten.reciprocal.default, + aten.reciprocal.out, + aten.reciprocal_.default, + aten.rad2deg.default, + aten.rad2deg.out, + aten.rad2deg_.default, + aten.relu.default, + aten.relu_.default, + aten.remainder.Scalar, + aten.remainder.Scalar_Tensor, + aten.remainder.Scalar_out, + aten.remainder.Tensor, + aten.remainder.Tensor_out, + aten.remainder_.Scalar, + aten.remainder_.Tensor, + aten.round.decimals, + aten.round.decimals_out, + aten.round.default, + aten.round.out, + aten.round_.decimals, + aten.round_.default, + aten.rsqrt.default, + aten.rsqrt.out, + aten.rsqrt_.default, + aten.rsub.Scalar, + aten.sgn.default, + aten.sgn.out, + aten.sgn_.default, + aten.sigmoid.default, + aten.sigmoid.out, + aten.sigmoid_.default, + aten.sign.default, + aten.sign.out, + aten.sign_.default, + aten.signbit.default, + aten.signbit.out, + aten.silu.default, + aten.silu.out, + aten.sin.default, + aten.sin.out, + aten.sin_.default, + aten.sinc.default, + aten.sinc.out, + aten.sinc_.default, + aten.sinh.default, + aten.sinh.out, + aten.sinh_.default, + aten.sqrt.default, + aten.sqrt.out, + aten.sqrt_.default, + aten.square.default, + aten.square.out, + aten.square_.default, + aten.sub.Scalar, + aten.sub.Tensor, + aten.sub.out, + aten.sub_.Scalar, + aten.sub_.Tensor, + aten.tan.default, + aten.tan.out, + aten.tan_.default, + aten.tanh.default, + aten.tanh.out, + aten.tanh_.default, + aten.true_divide.Tensor, + aten.trunc.default, + aten.trunc.out, + aten.trunc_.default, + aten.where.self, + aten.where.self_out, + aten.xlogy.OutScalar_Self, + aten.xlogy.OutScalar_Other, + aten.xlogy.OutTensor, + aten.xlogy.Scalar_Other, + aten.xlogy.Scalar_Self, + aten.xlogy.Tensor, + aten.xlogy_.Scalar_Other, + aten.xlogy_.Tensor, + # backward point-wise ops + # please keep the entries below alphabetically sorted + aten.gelu_backward.default, + aten.sigmoid_backward.default, + aten.silu_backward.default, + aten.tanh_backward.default, + aten.threshold_backward.default, +] + + +def pointwise_strategy( + mesh: DeviceMesh, op_schema: OpSchema, linearity: bool = False +) -> OpStrategy: + max_shards_strategy_index = -1 + max_shards = -1 + + if _is_inplace_op(op_schema.op): + # inplace op should follow the first arg strategy + followed_strategy = op_schema.args_schema[0] + elif _is_out_variant_op(op_schema.op): + # out variant op should follow the out kwarg strategy + followed_strategy = op_schema.kwargs_schema["out"] + else: + # normal pointwise op, we choose to follow the arg with + # the max shards in case operands needs reshard + for idx, arg_strategy in enumerate(op_schema.args_schema): + if not isinstance(arg_strategy, OpStrategy): + continue + + arg_max_shards = arg_strategy.max_num_shards() + if arg_max_shards > max_shards: + max_shards_strategy_index = idx + max_shards = arg_max_shards + + followed_strategy = op_schema.args_schema[max_shards_strategy_index] + + assert isinstance( + followed_strategy, OpStrategy + ), f"no strategy to follow for {op_schema}!" + return common_pointwise_strategy( + mesh, op_schema.args_schema, followed_strategy, linearity + ) + + +def common_pointwise_strategy( + mesh: DeviceMesh, + args_schema: Sequence[object], + followed_strategy: OpStrategy, + linearity: bool, +) -> OpStrategy: + # handle broadcasting + common_shape = torch.broadcast_shapes( + *[arg.output_shape for arg in args_schema if isinstance(arg, OpStrategy)] + ) + pointwise_strategy = OpStrategy([]) + + for placement_strategy in followed_strategy.strategies: + spec_to_follow = placement_strategy.output_spec + out_placements: List[Placement] = [] + for placement in spec_to_follow.placements: + if isinstance(placement, Shard): + shard_dim = normalize_dim(placement.dim, len(spec_to_follow.shape)) + common_ndim = len(common_shape) + new_shard_dim = common_ndim - len(spec_to_follow.shape) + shard_dim + out_placements.append(Shard(new_shard_dim)) + elif isinstance(placement, _Partial) and not linearity: + # clear the partial placemnet if op does not support linearity + # by default we just replicate the partial, need to see if this + # is optimal for all cases + out_placements.append(Replicate()) + else: + out_placements.append(placement) + + input_specs: List[DTensorSpec] = [] + redistribute_costs: List[List[float]] = [] + for idx, input_arg in enumerate(args_schema): + if isinstance(input_arg, OpStrategy): + # every arg follow the out_placements, but need to handle broadcasting + input_arg_spec = input_arg.strategies[0].output_spec + input_arg_dims_map = infer_broadcast_dims_map( + common_shape, input_arg_spec.shape + ) + input_target_placements = map_placements_after_broadcast( + tuple(out_placements), + common_shape, + input_arg_dims_map, + ) + input_arg_target_spec = DTensorSpec( + mesh=mesh, + placements=input_target_placements, + tensor_meta=input_arg_spec.tensor_meta, + ) + input_specs.append(input_arg_target_spec) + redistribute_costs.append( + generate_redistribute_costs(input_arg, input_arg_target_spec) + ) + + pointwise_strategy.strategies.append( + PlacementStrategy( + output_specs=DTensorSpec( + mesh=mesh, + placements=tuple(out_placements), + ), + input_specs=input_specs, + redistribute_cost=redistribute_costs, + ) + ) + return pointwise_strategy + + +def linear_pointwise_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + """ + Linear pointwise operators can propagate pending reductions. + For example, c = add(a, b); if a is pending sum, then c will be + pending sum as well without any communication overhead. + """ + return pointwise_strategy(mesh, op_schema, linearity=True) + + +for op in linear_pointwise_ops: + register_op_strategy(op, schema_info=RuntimeSchemaInfo(static_kwargkey=["out"]))( + linear_pointwise_strategy + ) + +for op in pointwise_ops: + register_op_strategy(op, schema_info=RuntimeSchemaInfo(static_kwargkey=["out"]))( + pointwise_strategy + ) + + +# TODO: add all for_each ops +for_each_ops = [ + aten._foreach_abs_.default, + aten._foreach_addcdiv_.Scalar, + aten._foreach_addcdiv_.ScalarList, + aten._foreach_addcdiv_.Tensor, + aten._foreach_addcmul.Scalar, + aten._foreach_addcmul_.Scalar, + aten._foreach_addcmul_.ScalarList, + aten._foreach_addcmul_.Tensor, + aten._foreach_div_.List, + aten._foreach_div_.ScalarList, + aten._foreach_lerp_.Scalar, + aten._foreach_maximum_.List, + aten._foreach_mul.Scalar, + aten._foreach_mul.List, + aten._foreach_mul_.Scalar, + aten._foreach_mul_.ScalarList, + aten._foreach_mul_.Tensor, + aten._foreach_mul_.List, + aten._foreach_neg.default, + aten._foreach_neg_.default, + aten._foreach_reciprocal_.default, + aten._foreach_sub_.Scalar, + aten._foreach_sqrt.default, + aten._foreach_sqrt_.default, + aten._foreach_zero_.default, +] + +for_each_linearity_ops = [ + aten._foreach_add.Scalar, + aten._foreach_add_.Scalar, + aten._foreach_add_.ScalarList, + aten._foreach_add.List, + aten._foreach_add_.List, +] + + +def foreach_list_pointwise_strategy( + mesh: DeviceMesh, op_schema: OpSchema, linearity: bool = False +) -> StrategyType: + """ + Apply the pointwise strategy to the zipped arguments. For example, if we + run a foreach add of two lists l1 and l2, then we apply the pointwise + strategy on each pair (l1[i], l2[i]). If the first argument is a list but + the second (or later) one is a tensor, then we broadcast the tensor by + replicating it into a list with the length of the first argument. + """ + + def args_tuple_strategies(args_schema: Tuple[object, ...]) -> List[TupleStrategy]: + first_arg = args_schema[0] + assert isinstance(first_arg, TupleStrategy) + strategy_len = len(first_arg.childs) + tuple_strategies: List[TupleStrategy] = [] + for arg_idx, arg in enumerate(args_schema): + if isinstance(arg, TupleStrategy): + # every tuple strategy should have the same length + assert len(arg.childs) == strategy_len + tuple_strategies.append(arg) + elif isinstance(arg, OpStrategy): + if arg_idx > 0: # implicitly broadcast + tuple_strategies.append( + TupleStrategy([arg for _ in range(strategy_len)]) + ) + else: + raise RuntimeError( + f"foreach list op only supports tuple strategy! {op_schema}" + ) + return tuple_strategies + + args_strategies = args_tuple_strategies(op_schema.args_schema) + follow_strategy: TupleStrategy = args_strategies[0] + foreach_strategy_list: List[OpStrategy] = [] + for child_idx, child_strtgy in enumerate(follow_strategy.childs): + assert isinstance(child_strtgy, OpStrategy) + args_schema: List[StrategyType] = [ + arg_strategy.childs[child_idx] for arg_strategy in args_strategies + ] + pointwise_strategy: OpStrategy = common_pointwise_strategy( + mesh, args_schema, child_strtgy, linearity + ) + foreach_strategy_list.append(pointwise_strategy) + return TupleStrategy(foreach_strategy_list) + + +def foreach_list_linear_pointwise_strategy( + mesh: DeviceMesh, op_schema: OpSchema +) -> StrategyType: + """ + for each list op stratgy that supports linearity + """ + return foreach_list_pointwise_strategy(mesh, op_schema, linearity=True) + + +for op in for_each_ops: + register_op_strategy(op, schema_info=RuntimeSchemaInfo(needs_pytree=True))( + foreach_list_pointwise_strategy + ) + +for op in for_each_linearity_ops: + register_op_strategy(op, schema_info=RuntimeSchemaInfo(needs_pytree=True))( + foreach_list_linear_pointwise_strategy + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/tensor_ops.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/tensor_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..4bfc4c60e91026f35bc87ba5ed791c3e1d6abd6b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/tensor_ops.py @@ -0,0 +1,826 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import itertools +from typing import cast, List, Optional, Sequence, Tuple + +import torch + +from torch.distributed._tensor._utils import compute_local_shape +from torch.distributed._tensor.op_schema import ( + OpSchema, + OpStrategy, + OutputSharding, + PlacementStrategy, + RuntimeSchemaInfo, + StrategyType, + TupleStrategy, +) +from torch.distributed._tensor.ops.common_rules import pointwise_rule +from torch.distributed._tensor.ops.embedding_ops import _MaskPartial +from torch.distributed._tensor.ops.utils import ( + generate_redistribute_costs, + is_tensor_dim_sharded, + is_tensor_partial, + is_tensor_shardable, + normalize_dim, + prod, + register_op_strategy, + register_prop_rule, +) +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) +from torch.distributed.device_mesh import DeviceMesh + + +aten = torch.ops.aten + + +def default_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + # Default strategy by default just propagate the first input strategy + select_strategy = op_schema.args_schema[0] + assert isinstance(select_strategy, OpStrategy) + default_strategy = [] + for strategy in select_strategy.strategies: + # we create new DTensorSpecs even for default strategy to assure that + # the tensor metas are distinct between the arguments and outputs + default_strategy.append( + PlacementStrategy( + output_specs=DTensorSpec( + mesh=strategy.output_spec.mesh, + placements=strategy.output_spec.placements, + ) + ) + ) + return OpStrategy(default_strategy) + + +register_op_strategy( + [ + aten.clone.default, + aten.contiguous.default, + aten.copy_.default, + aten.detach.default, + aten.fill_.Scalar, + aten.zero_.default, + ] +)(default_strategy) + +register_op_strategy( + aten._to_copy.default, schema_info=RuntimeSchemaInfo(static_kwargkey=["dtype"]) +)(default_strategy) + + +@register_op_strategy( + [ + aten.equal.default, + aten.is_same_size.default, + ] +) +def equal_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + # equal_strategy deals with ops that comparing two tensor, we need to make sure + # sharding layout the same with two operands, we choose to follow the arg with max + # num of shards, still keep is_same_size here for completeness as they share the + # same strategy in theory. + self_strategy, other_strategy = op_schema.args_schema + assert isinstance(self_strategy, OpStrategy) + assert isinstance(other_strategy, OpStrategy) + + select_strategy = ( + self_strategy + if self_strategy.max_num_shards() >= other_strategy.max_num_shards() + else other_strategy + ) + equal_strategy = OpStrategy([]) + + for arg_strategy in select_strategy.strategies: + arg_spec = arg_strategy.output_spec + if is_tensor_partial(arg_spec): + # if the arg_spec have partial, reshard to replicate + # otherwise local shard tensor comparison would be invalid + output_spec = DTensorSpec( + mesh=arg_spec.mesh, + placements=tuple( + Replicate() if isinstance(p, _Partial) else p + for p in arg_spec.placements + ), + ) + equal_strategy.strategies.append( + PlacementStrategy(output_specs=output_spec) + ) + else: + equal_strategy.strategies.append(PlacementStrategy(arg_spec)) + return equal_strategy + + +@register_op_strategy( + [ + aten.empty_like.default, + aten.ones_like.default, + aten.rand_like.default, + aten.randn_like.default, + aten.zeros_like.default, + ], + schema_info=RuntimeSchemaInfo(1, ["dtype"]), +) +@register_op_strategy( + [aten.full_like.default], + schema_info=RuntimeSchemaInfo(2, ["dtype"]), +) +@register_op_strategy( + [ + aten.randint_like.default, + aten.randint_like.low_dtype, + aten.randint_like.low_dtype_out, + ], + schema_info=RuntimeSchemaInfo(3, ["dtype"]), +) +def create_like_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + # create_like_strategy deals with ops that creating tensors with same + # shape as input, but with specific content that does not depend on + # the input, we can propagate sharding, but we have to make sure we + # move from partial to replicated. + select_strategy = op_schema.args_schema[0] + create_like_strategy = OpStrategy([]) + assert isinstance(select_strategy, OpStrategy) + for arg_strategy in select_strategy.strategies: + arg_spec = arg_strategy.output_spec + if is_tensor_partial(arg_spec): + # if the arg_spec have partial, accept partial + # in the input_specs but output replicate for + # those corresponding mesh dims + output_spec = DTensorSpec( + mesh=arg_spec.mesh, + placements=tuple( + Replicate() if isinstance(p, _Partial) else p + for p in arg_spec.placements + ), + ) + create_like_strategy.strategies.append( + PlacementStrategy(output_specs=output_spec, input_specs=(arg_spec,)) + ) + + else: + create_like_strategy.strategies.append(PlacementStrategy(arg_spec)) + + return create_like_strategy + + +@register_op_strategy( + [ + aten.new_empty.default, + aten.new_full.default, + aten.new_ones.default, + aten.new_zeros.default, + aten.new_empty_strided.default, # TODO: re-think new_empty_strided + ], + schema_info=RuntimeSchemaInfo(1, ["dtype"]), +) +def new_factory_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + # TODO: maybe we should generate all possible shardings intead of just stay + # replicated for new factory methods + input_strategy = op_schema.args_schema[0] + new_factory_strategy = OpStrategy([]) + assert isinstance(input_strategy, OpStrategy) + for arg_strategy in input_strategy.strategies: + input_spec = arg_strategy.output_spec + replica_spec = DTensorSpec(mesh, tuple([Replicate()] * mesh.ndim)) + new_factory_strategy.strategies.append( + PlacementStrategy(output_specs=replica_spec, input_specs=(input_spec,)) + ) + + return new_factory_strategy + + +@register_op_strategy(aten.bucketize.Tensor) +def gen_bucketize_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + """Just propagate input sharding, but expect replicated for boundaries input.""" + input_strategy = op_schema.args_schema[0] + bucketize_strategy = OpStrategy([]) + assert isinstance(input_strategy, OpStrategy) + for arg_strategy in input_strategy.strategies: + arg_spec = DTensorSpec(mesh, arg_strategy.output_spec.placements) + replica_spec = DTensorSpec(mesh, tuple([Replicate()] * mesh.ndim)) + bucketize_strategy.strategies.append( + PlacementStrategy( + output_specs=arg_spec, input_specs=(arg_spec, replica_spec) + ) + ) + + return bucketize_strategy + + +@register_op_strategy(aten.slice.Tensor, schema_info=RuntimeSchemaInfo(1)) +def gen_slice_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + """Forward all shardings except the slice dimension.""" + defaults = (None, 0, None, None, 1) + input_strategy, dim, start, end, step = ( + op_schema.args_schema + defaults[len(op_schema.args_schema) :] + ) + assert isinstance(input_strategy, OpStrategy) + input_shape = input_strategy.output_shape + input_ndim = input_strategy.output_ndim + assert isinstance(dim, int) + if start is None: + start = 0 + if end is None or end > input_shape[dim]: + end = input_shape[dim] + assert isinstance(start, int) + assert isinstance(end, int) + assert isinstance(step, int) + + # normalize args + slice_dim = normalize_dim(dim, input_ndim) + start = normalize_dim(start, input_shape[dim]) + end = normalize_dim(end, input_shape[dim]) + + redundant_slice = start == 0 and end == input_shape[dim] and step == 1 + + slice_strategy = OpStrategy([]) + + for arg_strategy in input_strategy.strategies: + arg_spec = arg_strategy.output_spec + if not is_tensor_dim_sharded(arg_spec, dim=slice_dim) or redundant_slice: + # only add the strategy if the slice dim is not sharded + out_spec = DTensorSpec(mesh, arg_spec.placements) + slice_strategy.strategies.append(PlacementStrategy(output_specs=out_spec)) + if not slice_strategy.strategies: + # if all strategies are filtered out, unsharding all specs on slice dim + # of the input strategy, and use that as the op strategy + for arg_strategy in input_strategy.strategies: + arg_spec = arg_strategy.output_spec + unshard_spec = DTensorSpec( + mesh, unshard_tensor_dim(arg_spec.placements, dim=slice_dim) + ) + slice_strategy.strategies.append( + PlacementStrategy(output_specs=unshard_spec) + ) + return slice_strategy + + +def unshard_tensor_dim( + placements: Sequence[Placement], dim: int +) -> Tuple[Placement, ...]: + """Disallow the given tensor dimension to be sharded.""" + return tuple( + p if (not isinstance(p, Shard) or p.dim != dim) else Replicate() + for p in placements + ) + + +def replicate_tensor_dim( + placements: Sequence[Placement], dim: int +) -> Tuple[Placement, ...]: + """Force the given tensor dimension to be replicated.""" + # Not using p.is_shard() to avoid mypy complain about Placement not having + # attribute dim. + return tuple( + Replicate() if p.is_partial() or isinstance(p, Shard) and p.dim == dim else p + for p in placements + ) + + +@register_op_strategy(aten.slice_scatter.default, schema_info=RuntimeSchemaInfo(2)) +def gen_slice_scatter_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + # 1. number of dimensions in input and src need to match. + # 2. number of elements on all non-dim need to match between input and src. + # 3. numer of elements in src in dim need to match the slice size. + # Given the above: + # - We suggest for src to follow the sharding of input, except on the scatter dimension, + # where our best bet for now is to make them replicated as a fall-back. + # TODO: Ideally we'd like to make sure the output is re-sharded afterwards to keep input sharding. + + input_strategy = op_schema.args_schema[0] + assert isinstance(input_strategy, OpStrategy) + input_ndim = input_strategy.output_ndim + slice_dim = ( + cast(int, op_schema.args_schema[2]) if len(op_schema.args_schema) > 2 else 0 + ) + slice_dim = normalize_dim(slice_dim, input_ndim) + + slice_scatter_strategy = OpStrategy([]) + # by default follow the input strategy for both input and src + for arg_strategy in input_strategy.strategies: + arg_spec = arg_strategy.output_spec + if not ( + is_tensor_dim_sharded(arg_spec, dim=slice_dim) + or is_tensor_partial(arg_spec) + ): + # only add the strategy if the slice_scatter dim is not sharded or partial + slice_scatter_strategy.strategies.append( + PlacementStrategy(output_specs=arg_spec) + ) + + if not slice_scatter_strategy.strategies: + # if all strategies are filtered out, replicating all specs on slice_scatter dim + # of the input strategy, and use that as the op strategy + for arg_strategy in input_strategy.strategies: + arg_spec = arg_strategy.output_spec + replicate_spec = DTensorSpec( + mesh, replicate_tensor_dim(arg_spec.placements, dim=slice_dim) + ) + slice_scatter_strategy.strategies.append( + PlacementStrategy(output_specs=replicate_spec) + ) + return slice_scatter_strategy + + +@register_op_strategy(aten._local_scalar_dense.default) +def replica_only_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + """Only allow replication on the input/output.""" + replicate_spec = DTensorSpec(mesh, tuple([Replicate()] * mesh.ndim)) + return OpStrategy([PlacementStrategy(replicate_spec)]) + + +@register_op_strategy(aten.gather.default) +def gather_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + input_strategy = cast(OpStrategy, op_schema.args_schema[0]) + dim = cast(int, op_schema.args_schema[1]) + index_strategy = cast(OpStrategy, op_schema.args_schema[2]) + + input_shape = input_strategy.output_shape + index_shape = index_strategy.output_shape + + all_mesh_dim_strategies = [] + + for mesh_dim in range(mesh.ndim): + single_mesh_dim_strategies = [] + + # placement list stores placements of [output, input, index] + # first we always have replicate all for inputs and output + all_replicate: List[Placement] = [Replicate()] * 3 + single_mesh_dim_strategies.append(all_replicate) + + # input sharding, input sharded, index accepts mask partial, output follows index + # this only works when the input is sharded on the gather dimension, and + # index has size 1 on the gather dimension + if index_shape[dim] == 1: + index_partial_placement = _MaskPartial(logical_dim_size=input_shape[dim]) + input_sharding = [ + index_partial_placement, + Shard(dim), + index_partial_placement, + ] + single_mesh_dim_strategies.append(input_sharding) + + # index sharding, input replicated, index sharded, output follows index + # this only works when the sharding dimension is the gather dimension + index_sharding = [Shard(dim), Replicate(), Shard(dim)] + single_mesh_dim_strategies.append(index_sharding) + + all_mesh_dim_strategies.append(single_mesh_dim_strategies) + + strategy_combs = itertools.product(*all_mesh_dim_strategies) + + all_strategies = [] + for strategy_comb in strategy_combs: + spec_list = [] + for specs in zip(*strategy_comb): + spec_list.append(DTensorSpec(mesh, tuple(specs))) + + if is_tensor_shardable(input_shape, spec_list[1]) and is_tensor_shardable( + index_shape, spec_list[2] + ): + input_spec, index_spec = spec_list[1:] + redistribute_cost = [ + generate_redistribute_costs(input_strategy, input_spec), + generate_redistribute_costs(index_strategy, index_spec), + ] + strat = PlacementStrategy( + output_specs=spec_list[0], + input_specs=spec_list[1:], + redistribute_cost=redistribute_cost, + ) + all_strategies.append(strat) + + return OpStrategy(all_strategies) + + +@register_op_strategy(aten.stack.default, RuntimeSchemaInfo(1, needs_pytree=True)) +def stack_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + args_schema = op_schema.args_schema + input_tuple_strategy = args_schema[0] + assert isinstance(input_tuple_strategy, TupleStrategy), f"{input_tuple_strategy}" + dim = cast(int, args_schema[1]) if len(args_schema) > 1 else 0 + + # Follow the 1st child strategy's placement strategies + child_strategy = input_tuple_strategy.childs[0] + assert isinstance(child_strategy, OpStrategy), f"{child_strategy}" + strategies: List[PlacementStrategy] = [] + + # For each arg strategy of the child to follow, we check if every other + # child has an equal strategy. If so, then that is a valid strategy. If + # there are no such valid strategies, then we replicate. + for arg_strategy in child_strategy.strategies: + arg_spec = arg_strategy.output_spec + # For each arg strategy (whether the one to follow or other), we + # replicate the stack dim since we cannot stack on a sharded dim + if is_tensor_dim_sharded(arg_spec, dim): + arg_spec = DTensorSpec( + mesh, unshard_tensor_dim(arg_spec.placements, dim=dim) + ) + all_compatible = True + for other_child_strategy in input_tuple_strategy.childs[1:]: + has_compatible_strategy = False + assert isinstance( + other_child_strategy, OpStrategy + ), f"{other_child_strategy}" + for other_arg_strategy in other_child_strategy.strategies: + other_arg_spec = other_arg_strategy.output_spec + if is_tensor_dim_sharded(other_arg_spec, dim): + other_arg_spec = DTensorSpec( + mesh, unshard_tensor_dim(other_arg_spec.placements, dim=dim) + ) + if other_arg_spec.placements == arg_spec.placements: + has_compatible_strategy = True + break + if not has_compatible_strategy: + all_compatible = False + break + if all_compatible: + input_specs = tuple( + arg_spec for _ in range(len(input_tuple_strategy.childs)) + ) + strategies.append( + PlacementStrategy( + output_specs=DTensorSpec(mesh, arg_spec.placements), + input_specs=input_specs, + ) + ) + if not strategies: + # Arbitrarily use each child strategy's 0th strategy's output spec + input_specs = tuple( + cast(OpStrategy, child_strategy).strategies[0].output_spec + for child_strategy in input_tuple_strategy.childs + ) + replicate_spec = DTensorSpec(mesh, tuple(Replicate() for _ in range(mesh.ndim))) + strategies.append(PlacementStrategy(output_specs=replicate_spec)) + return OpStrategy(strategies) + + +@register_prop_rule(aten.index_select.default, schema_info=RuntimeSchemaInfo(1)) +def prop_index_select(op_schema: OpSchema) -> OutputSharding: + values_spec, dim, indices_spec = op_schema.args_schema + + assert isinstance(values_spec, DTensorSpec) + assert isinstance(dim, int) + assert isinstance(indices_spec, DTensorSpec) + + all_indices_spec: List[Optional[DTensorSpec]] = [ + indices_spec if dim == i else None for i in range(values_spec.ndim) + ] + + result = prop_index( + OpSchema( + op=op_schema.op, + args_schema=(values_spec, all_indices_spec), + kwargs_schema=op_schema.kwargs_schema, + ) + ) + if result.schema_suggestions: + result.schema_suggestions = [ + OpSchema( + op=op_schema.op, + args_schema=(s.args_schema[0], dim, s.args_schema[1][dim]), + kwargs_schema=op_schema.kwargs_schema, + ) + for s in result.schema_suggestions + ] + return result + + +@register_prop_rule(aten.index.Tensor, schema_info=RuntimeSchemaInfo(needs_pytree=True)) +def prop_index(op_schema: OpSchema) -> OutputSharding: + """ + Expect replicated on the first input; _mostly_ pointwise on the second input. + + TODO: exception: when the dtype of second input is "bool", then a torch.nonzero needs to be triggered first. + """ + # Current sharding constraints: + # For values: + # 1. We currently require that the dimension of values_spec be replicated or partial + # if they are being indexed on. + # 2. Other dimensions of values_spec can remain sharded if they are so. + # For indices: + # Indices can be either sharded or replicated. All index tensors need to be sharded + # in a compatible way, following the pointwise rule (including resolving _Partial + # into either sharded or replicated) + + values_spec, multi_indices_spec = op_schema.args_schema + assert isinstance(values_spec, DTensorSpec) + assert isinstance(multi_indices_spec, list) + multi_indices_spec = cast(List[Optional[DTensorSpec]], multi_indices_spec) + valid_indices_spec: List[Tuple[int, DTensorSpec]] = [ + (i, a) for i, a in enumerate(multi_indices_spec) if a is not None + ] + + # 1. All indices have to be sharded equally. Moreover, indices can be broadcast. + # Here, we piggyback on the pointwise sharding rule for indices. + indices_out = pointwise_rule( + OpSchema( + op=op_schema.op, + args_schema=tuple(v[1] for v in valid_indices_spec), + kwargs_schema={}, + ) + ) + need_reshard_on_indices = indices_out.output_spec is None + + if not need_reshard_on_indices: + # this means that our inputs are already sharded properly and we will use that as our indices_spec + assert isinstance(indices_out.output_spec, DTensorSpec) + indices_spec: DTensorSpec = indices_out.output_spec + else: + assert indices_out.schema_suggestions is not None + valid_indices_suggestion = indices_out.schema_suggestions[0] + for i, v in enumerate(valid_indices_suggestion.args_spec): + multi_indices_spec[valid_indices_spec[i][0]] = v + # we'll need to call pointwise_rule again to see what's our ideal indices_spec and then + # use that to compute our ideal values_spec + indices_output_spec = pointwise_rule(valid_indices_suggestion).output_spec + assert isinstance(indices_output_spec, DTensorSpec) + indices_spec = indices_output_spec + + lookup_dims = {v[0] for v in valid_indices_spec} + + need_reshard_on_values = tuple( + (isinstance(vp, Shard) and (vp.dim in lookup_dims or isinstance(ip, Shard))) + for vp, ip in zip(values_spec.placements, indices_spec.placements) + ) + + if not need_reshard_on_indices and not any(need_reshard_on_values): + value_placements = values_spec.placements + + all_dims_consecutive = all( + b[0] - a[0] == 1 + for b, a in zip(valid_indices_spec[1:], valid_indices_spec[:-1]) + ) + if all_dims_consecutive: + # if all index vectors are consecutives, insert at the dimension of the first index + insert_dim: int = valid_indices_spec[0][0] + else: + # else, insert on the first dimension + insert_dim = 0 + + def place(vp: Placement, ip: Placement) -> Placement: + if isinstance(vp, Shard): + return Shard( + vp.dim + if vp.dim < insert_dim + # accounts for the offset in output dimensions + else vp.dim + + indices_spec.ndim + - sum(1 if vp.dim > v[0] else 0 for v in valid_indices_spec) + ) + if isinstance(ip, Shard): + return Shard(ip.dim + insert_dim) + # _Partial or Replicated + return vp + + value_placements = tuple( + place(vp, ip) + for vp, ip in zip(values_spec.placements, indices_spec.placements) + ) + result = OutputSharding( + output_spec=DTensorSpec( + mesh=values_spec.mesh, + placements=value_placements, + ) + ) + return result + else: + result = OutputSharding( + output_spec=None, + schema_suggestions=[ + OpSchema( + op=op_schema.op, + args_schema=( + DTensorSpec( + mesh=values_spec.mesh, + placements=tuple( + [ + Replicate() if need_reshard_on_values[i] else v + for i, v in enumerate(values_spec.placements) + ] + ), + tensor_meta=values_spec.tensor_meta, + ), + multi_indices_spec, + ), + kwargs_schema=op_schema.kwargs_schema, + ) + ], + ) + return result + + +@register_prop_rule( + aten.cat.default, schema_info=RuntimeSchemaInfo(1, needs_pytree=True) +) +def cat_rule(op_schema: OpSchema) -> OutputSharding: + # torch.cat requires all tensors must either have the same shape (except + # in the concatenating dimension) or be "empty". "Empty" here strictly means + # tensor.shape is torch.Size([0]). When tensor.ndim > 1, it will be treated + # as a non-empty tensor and the shape must match on non-cat dimensions. + def is_empty(spec: DTensorSpec) -> bool: + return list(spec.shape) == [0] + + # the first arg is a list of input tensor specs + tensor_list_specs = cast(List[DTensorSpec], op_schema.args_schema[0]) + assert len(tensor_list_specs) > 0, "torch.cat expects a non-empty list of tensors" + non_empty_specs = [spec for spec in tensor_list_specs if not is_empty(spec)] + + if len(non_empty_specs) == 0: + # all tensors are empty, we can return any output sharding + return OutputSharding( + output_spec=DTensorSpec( + mesh=tensor_list_specs[0].mesh, + placements=tensor_list_specs[0].placements, + ) + ) + + assert all( + spec.ndim == non_empty_specs[0].ndim for spec in non_empty_specs + ), f"Expect all tensors to have same shape or empty, but got {tensor_list_specs}" + assert all( + spec.mesh == tensor_list_specs[0].mesh for spec in tensor_list_specs + ), f"Expect all tensors to have same mesh, but got {tensor_list_specs}" + + # ndim will also be the result's ndim + ndim = 1 + for spec in tensor_list_specs: + ndim = max(ndim, spec.ndim) + + dim = 0 # default dim = 0 + if len(op_schema.args_schema) > 1: + dim = cast(int, op_schema.args_schema[1]) + dim = normalize_dim(dim, ndim) + + # Make sure all tensors are replicated on cat dimension + need_reshard = False + tensor_list_specs_after: List[DTensorSpec] = [] + for spec in tensor_list_specs: + if not is_empty(spec) and ( + is_tensor_dim_sharded(spec, dim=dim) or is_tensor_partial(spec) + ): + need_reshard = True + tensor_list_specs_after.append( + DTensorSpec( + mesh=spec.mesh, + placements=replicate_tensor_dim(spec.placements, dim=dim), + tensor_meta=spec.tensor_meta, + ) + ) + else: + tensor_list_specs_after.append(spec) + + tensor_list_specs = tensor_list_specs_after + + # align non-cat dimensions placements based on reshard cost + non_empty_specs = [spec for spec in tensor_list_specs if not is_empty(spec)] + mesh = non_empty_specs[0].mesh + ndim = non_empty_specs[0].ndim + new_placements: List[Placement] = [] + for mesh_dim in range(mesh.ndim): + # compute the minimum cost of resharding on this mesh_dim + if any( + spec.placements[mesh_dim] != non_empty_specs[0].placements[mesh_dim] + for spec in non_empty_specs + ): + # only reshard if there is a mismatch + need_reshard = True + reshard_cost = [] + for shard_dim in range(ndim): + # compute the cost of resharding on this shard_dim + cost: float = 0.0 + for spec in non_empty_specs: + global_shape = spec.shape + if global_shape[shard_dim] < mesh.size(mesh_dim): + # found one tensor where the shard_dim is smaller than + # mesh_dim. In this case, we cannot shard on this shard_dim, + # and hence set cost to infinity. + cost = +float("inf") + elif ( + is_tensor_dim_sharded(spec, dim=shard_dim) + or prod(global_shape) == 0 + ): + continue + else: + local_shape = compute_local_shape( + global_shape, spec.mesh, spec.placements + ) + cost += prod(local_shape) * spec.mesh.size(mesh_dim) + reshard_cost.append(cost) + best_dim = reshard_cost.index(min(reshard_cost)) + new_placements.append(Shard(best_dim)) + else: + # no mismatch, keep the original placement + new_placements.append(non_empty_specs[0].placements[mesh_dim]) + + if need_reshard: + tensor_list_specs_after = [] + for spec in tensor_list_specs: + if is_empty(spec): + tensor_list_specs_after.append(spec) + else: + tensor_list_specs_after.append( + DTensorSpec( + mesh=spec.mesh, + placements=tuple(new_placements), + tensor_meta=spec.tensor_meta, + ) + ) + + return OutputSharding( + output_spec=None, + schema_suggestions=[ + OpSchema( + op=op_schema.op, + args_schema=( + tuple(tensor_list_specs_after), + *op_schema.args_schema[1:], + ), + kwargs_schema=op_schema.kwargs_schema, + ), + ], + ) + else: + # at this point, the cat dim is not sharded, + return OutputSharding( + output_spec=DTensorSpec( + mesh=non_empty_specs[0].mesh, + placements=non_empty_specs[0].placements, + ), + ) + + +@register_prop_rule( + [ + aten.split.Tensor, + aten.split_with_sizes.default, + aten.split_with_sizes_copy.default, + ], + schema_info=RuntimeSchemaInfo(1), +) +def split_rule(op_schema: OpSchema) -> OutputSharding: + output_spec_list: List[DTensorSpec] = [] + input_spec = cast(DTensorSpec, op_schema.args_schema[0]) + ndim = input_spec.ndim + split_size_or_sections = op_schema.args_schema[1] + dim = cast(int, op_schema.args_schema[2]) if len(op_schema.args_schema) > 2 else 0 + dim = normalize_dim(dim, ndim) + + # TODO: tensor to split cannot have _Partial + # in its placements for now. Will need to + # support in future. + if input_spec.sums: + raise NotImplementedError( + f"splitting distributed tensor with " + f"_Partial placement is not implemented!\n" + f"DTensorSpec={input_spec}" + ) + + # TODO: just like slice op, split replicates before + # splitting on a sharded dimension + need_reshard = False + if is_tensor_dim_sharded(input_spec, dim=dim): + need_reshard = True + input_spec = DTensorSpec( + mesh=input_spec.mesh, + placements=unshard_tensor_dim(input_spec.placements, dim=dim), + tensor_meta=input_spec.tensor_meta, + ) + + if need_reshard: + return OutputSharding( + None, + schema_suggestions=[ + OpSchema( + op=op_schema.op, + args_schema=(input_spec,) + op_schema.args_schema[1:], + kwargs_schema=op_schema.kwargs_schema, + ), + ], + ) + + def size_split(N, i): + # Last chunk will be smaller if the tensor size N + # along the given dimension dim is not divisible by i. + assert i > 0 + return [i] * (N // i) + ([N % i] if N % i != 0 else []) + + output_size_list = ( + size_split(input_spec.shape[dim], split_size_or_sections) + if isinstance(split_size_or_sections, int) + else split_size_or_sections + ) + output_spec_list = [ + DTensorSpec( + mesh=input_spec.mesh, + placements=input_spec.placements, + ) + for _ in range(len(output_size_list)) + ] + return OutputSharding(output_spec_list) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8fe15e3781e520846a3bcb272b2a08fedbbe6c12 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/utils.py @@ -0,0 +1,226 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import functools +import operator +from typing import cast, Iterable, List, Sequence, Tuple, Union + +import torch +from torch.distributed._tensor._collective_utils import redistribute_cost +from torch.distributed._tensor.api import DTensor +from torch.distributed._tensor.op_schema import OpStrategy, RuntimeSchemaInfo +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) + + +# convenient wrapper to register sharding propagation rules +# pyre-fixme[3]: Return type must be annotated. +# pyre-fixme[2]: Parameter must be annotated. +def register_prop_rule(op, schema_info=None): + # pyre-fixme[53]: Captured variable `func` is not annotated. + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. + def wrapper(impl): + overloads = op if isinstance(op, list) else [op] + for overload in overloads: + DTensor._op_dispatcher.sharding_propagator.register_sharding_prop_rule( + overload, impl, schema_info + ) + return impl + + return wrapper + + +def register_op_strategy(op, schema_info=None): + # pyre-fixme[53]: Captured variable `func` is not annotated. + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. + + # For every ATen op that accepts any args in this list, + # the arg itself can impact the strides (and potentially the sharding strategy) + # of the output tensor. + # thus, we will detect ATen schemas with any of these args and ensure + # that they get specialized here. + arg_names_that_require_specializing_cache_strategy = [ + "memory_format", + ] + + def wrapper(impl): + if isinstance(op, list): + overloads = op + else: + overloads = [op] + + for overload in overloads: + curr_schema_info = None + if schema_info is None: + specialized_args = [ + a.name + for a in overload._schema.arguments + if a.name in arg_names_that_require_specializing_cache_strategy + ] + if any(specialized_args): + curr_schema_info = RuntimeSchemaInfo( + static_kwargkey=specialized_args + ) + else: + curr_schema_info = schema_info + DTensor._op_dispatcher.sharding_propagator.register_op_strategy( + overload, impl, curr_schema_info + ) + return impl + + return wrapper + + +def as_list( + x: Union[List[object], object] + # pyre-fixme[11]: Annotation `immutable_list` is not defined as a type. +) -> Union[List[object], torch.fx.immutable_collections.immutable_list]: # type: ignore[valid-type] + # During tracing, `aten.sum.dim_IntList` uses `immutable_list` for its args, + # which is an object but treated as a list by the tracer. Therefore, keep + # `immutable_list` intact here as well. + if type(x) is list or isinstance(x, torch.fx.immutable_collections.immutable_list): + return x + else: + return [x] + + +def normalize_dim(dim: int, ndim: int) -> int: + return dim if dim >= 0 else dim + ndim + + +def normalize_dims(dims: Union[int, Sequence[int]], ndim: int) -> Sequence[int]: + """Normalize a dim or a sequence of dims, so that they are all positive.""" + if isinstance(dims, int): + dims = (normalize_dim(dims, ndim),) + elif isinstance(dims, list): + dims = [normalize_dim(dim, ndim) for dim in dims] + elif isinstance(dims, tuple): + dims = tuple([normalize_dim(dim, ndim) for dim in dims]) + return dims + + +def normalize_to_torch_size(size) -> torch.Size: + """ + Unify variable types of size argument to torch.Size + Acceptable types include: + int, Sequence[int], Tuple[int], Tuple[Sequence[int]], + or torch.Size + """ + if isinstance(size, torch.Size): + return size + + if isinstance(size, int): + torch_size = [size] + elif len(size) == 1 and isinstance(size[0], Sequence): + torch_size = list(size[0]) + else: + torch_size = list(size) + return torch.Size(torch_size) + + +def prod(xs: Iterable[int]) -> int: + return functools.reduce(operator.mul, xs, 1) + + +def is_tensor_shardable(shape: Sequence[int], spec: DTensorSpec) -> bool: + """Check if the shape is shardable according to the spec.""" + # number of shards in each tensor dimension + shards_map = [1] * len(shape) + for i, placement in enumerate(spec.placements): + if placement.is_shard(): + shard_dim = cast(Shard, placement).dim + shards_map[shard_dim] *= spec.mesh.size(i) + + for i, dim_size in enumerate(shape): + # TODO: maybe we should determine is_shardable based on + # whether it's evenly sharded or not + if shards_map[i] > 1 and dim_size < shards_map[i]: + return False + + return True + + +def is_tensor_evenly_shardable(shape: Sequence[int], spec: DTensorSpec) -> bool: + """Check if the shape is evenly shardable according to the spec.""" + # number of shards in each tensor dimension + shards_map = [1] * len(shape) + for i, placement in enumerate(spec.placements): + if placement.is_shard(): + shard_dim = cast(Shard, placement).dim + shards_map[shard_dim] *= spec.mesh.size(i) + + for i, dim_size in enumerate(shape): + if shards_map[i] > 1 and (dim_size % shards_map[i] != 0): + return False + + return True + + +def is_tensor_dim_sharded(spec: DTensorSpec, dim: int) -> bool: + """Return True if tensor dim is sharded.""" + return any(p.is_shard(dim) for p in spec.placements) + + +def is_tensor_partial(spec: DTensorSpec) -> bool: + """Return True if tensor is partial on the mesh.""" + return any(p.is_partial() for p in spec.placements) + + +def infer_broadcast_dims_map( + common_shape: torch.Size, input_shape: torch.Size +) -> List[int]: + # infer the broadcast dims map, where it maps from the common shape dim to the input shape dim + # this is aligned with the broadcast semantics + common_ndim = len(common_shape) + input_ndim = len(input_shape) + broadcast_dims_map = [-1] * common_ndim + for idx in range(-1, -1 - input_ndim, -1): + if input_shape[idx] == common_shape[idx]: + broadcast_dims_map[common_ndim + idx] = input_ndim + idx + return broadcast_dims_map + + +def map_placements_after_broadcast( + placements: Tuple[Placement, ...], + shape: torch.Size, + broadcast_dims_map: List[int], +) -> Tuple[Placement, ...]: + """Map each placement based on the output shape after broadcast.""" + new_placements: List[Placement] = [] + for placement in placements: + if isinstance(placement, (Replicate, _Partial)): + new_placements.append(placement) + else: + assert isinstance(placement, Shard) + shard_dim = normalize_dim(placement.dim, len(shape)) + new_shard_dim = broadcast_dims_map[shard_dim] + if new_shard_dim != -1: + # there's a map from the common shape shard dim to + # the input shape shard dim before broadcasting, + # use that instead + new_placements.append(Shard(new_shard_dim)) + else: + # there's no map between common shape shard dim and + # the input shape shard dim before broadcasting, + # in this case it means implicit broadcasting happen + # in this dim, so we can just mark it as replicate + # and implict broadcast will broadcast automatically + # to the sharded shape + new_placements.append(Replicate()) + + return tuple(new_placements) + + +def generate_redistribute_costs( + src_strategy: OpStrategy, dst_spec: DTensorSpec +) -> List[float]: + redistribute_costs: List[float] = [] + for strat in src_strategy.strategies: + redistribute_costs.append(redistribute_cost(strat.output_spec, dst_spec)) + + return redistribute_costs diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3e9a9df36146b553cde0010c58e28da0f3f0cc7d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py @@ -0,0 +1,375 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +""" +Each host in a distributed PyTorch job runs with a single TorchElastic agent, +and multiple workers (as children processes of the TorchElastic agent). +Since the workers are user-provided (your PyTorch script/job), TorchElastic +has a way to propagate errors on the trainers through the agent and up to the +scheduler, which ultimately informs the end-user about the state of the job +and applies any retry policies. + +TorchElastic categorizes errors into 3 categories: + ++----------------+----------------+--------------------------------------------------------------+ +| Category | Sub-Category | Description | ++================+================+==============================================================+ +| User Error | Input Error | invalid inputs to TorchElastic APIs (e.g. min > max nodes) | +| +----------------+--------------------------------------------------------------+ +| | Worker Failure | any failures on the worker child process | ++----------------+----------------+--------------------------------------------------------------+ +| Platform Error | n/a | failures caused by the agent | ++----------------+----------------+--------------------------------------------------------------+ +| Infra Error | n/a | failures outside the domain of the agent and workers | +| | | (e.g. host failures) | ++----------------+----------------+--------------------------------------------------------------+ + +All errors other than "Worker Failure" are either raised canonically from the +agent process or implicitly or explicitly crash the agent process. So the +standard language (python) provided exception handling strategies apply. + +Worker Failures are special because the exception/failure originates on a different +process from the agent so the error needs to be propagated inter-process +(e.g. the agent cannot simply ``try-catch`` an exception raised on the worker process). + +TorchElastic agents use :func:`torch.distributed.elastic.multiprocessing.start_processes` +to launch the workers which has a simple file based inter-process error propagation +built-in. + +Any function or binary entrypoint decorated with :func:`record` +will write uncaught exceptions (with the trace information) to a file specified by the +environment variable ``TORCHELASTIC_ERROR_FILE``. The parent process (e.g. agent) +sets this env var on each child it launches, then aggregates the error files for all +children, and propagates the one with the **smallest** timestamp (e.g. the **first** error). +""" + +import json +import os +import signal +import socket +import time +import warnings +from dataclasses import dataclass, field +from datetime import datetime +from functools import wraps +from string import Template +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar + +from torch.distributed.elastic.utils.logging import get_logger + +from .error_handler import ErrorHandler # noqa: F401 +from .handlers import get_error_handler # noqa: F401 + +__all__ = ["ProcessFailure", "ChildFailedError", "record", "ErrorHandler", "get_error_handler"] + +log = get_logger(__name__) + + +JSON = Dict + +_EMPTY_ERROR_DATA = {"message": ""} +_NOT_AVAILABLE = "" + +T = TypeVar("T") + + +@dataclass +class ProcessFailure: + """ + Represent the failed process result. When the worker process fails, it may record failure root cause into the file. + + Tries to read the failure timestamp from the provided ``error_file``, + if the ``error_file`` does not exist, the timestamp is the current + timestamp (seconds since epoch). + + The ``message`` field is a concise explanation of the failure. If + the error file exists then the message is obtained from the error file. + Otherwise one is generated based on the failure signature. + + .. note:: It is assumed that the ``error_file`` is written by + ``torch.distributed.elastic.multiprocessing.errors.error_handler.ErrorHandler``. + Otherwise the behavior is undefined. + + """ + + local_rank: int + pid: int + exitcode: int + error_file: str + error_file_data: JSON = field(init=False) + message: str = field(init=False) + timestamp: int = field(init=False) + + def __post_init__(self): + self.error_file_data = _EMPTY_ERROR_DATA + if os.path.isfile(self.error_file): + try: + with open(self.error_file) as fp: + self.error_file_data = json.load(fp) + log.debug( + "User process failed with error data: %s", json.dumps(self.error_file_data, indent=2) + ) + self.message, self.timestamp = self._get_error_data( + self.error_file_data + ) + except Exception: + log.exception("Failed to parse reply file: %s", self.error_file) + raise + else: + self._set_no_reply_file() + + # make up an informative message if not already present + if not self.message: + # signals typically do not generate an error file message + if self.exitcode < 0: + self.message = ( + f"Signal {-self.exitcode} ({self.signal_name()})" + f" received by PID {self.pid}" + ) + else: + self.message = "To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html" + + def _get_error_data(self, error_file_data: Dict[str, Any]) -> Tuple[str, int]: + message = error_file_data["message"] + if isinstance(message, str): + timestamp = int(error_file_data.get("timestamp", 0)) + else: + timestamp = int(message["extraInfo"]["timestamp"]) + return (message, timestamp) + + def _set_no_reply_file(self): + self.error_file = _NOT_AVAILABLE + self.error_file_data = _EMPTY_ERROR_DATA + self.message = "" + self.timestamp = int(time.time()) + + def signal_name(self) -> str: + if self.exitcode < 0: + # We don't want to kill the parent process trying to find the signal name. + # if the signal doesn't map to a known name, use not available. + try: + return signal.Signals(-self.exitcode).name + except Exception: + return _NOT_AVAILABLE + else: + return _NOT_AVAILABLE + + def timestamp_isoformat(self): + """Return timestamp in ISO format (YYYY-MM-DD_HH:MM:SS).""" + return datetime.fromtimestamp(self.timestamp).isoformat(sep="_") + + +GlobalRank = int + +_FAILURE_FORMAT_TEMPLATE = """[${idx}]: + time : ${time} + host : ${hostname} + rank : ${rank} (local_rank: ${local_rank}) + exitcode : ${exitcode} (pid: ${pid}) + error_file: ${error_file} + traceback : ${message}""" + +# extra new lines before and after are intentional +_MSG_FORMAT_TEMPLATE = """ +${boarder} +${title} +${section} +Failures: +${other_failures} +${section} +Root Cause (first observed failure): +${root_failure} +${boarder}""" + + +class ChildFailedError(Exception): + """ + Special exception type that can be raised from a function annotated with the + ``@record`` decorator to have the child process' (root exception) propagate + up the stack as-is (e.g. without being wrapped in the parent's traceback). + + Useful in cases where the parent is a simple nanny process + and the child (worker) processes are actually doing meaningful compute. + In this case, errors typically occur on the child process as the parent + is not doing anything non-trivial, and child errors should be propagated + to the scheduler for accurate root cause diagnostics. + + .. note:: The propagation relies on error files rather than exception handling to + support both function and binary launches. + + Example: + :: + + # process tree on a host (container) + 0: scheduler-init-process: + |- 1: torchelastic_agent: + |- 2: trainer_0 (ok) + |- 3: trainer_1 (fail) -> error.json + |- ... + |- n+2: trainer_n (ok) + |- n+3: other processes + |- ... + + In the example above, trainer 1's failure (written into error.json) is + the root cause and should be reported to the scheduler's init process. + The torchelastic agent raises a ``ChildFailedError("trainer", {1: "trainer_1/error.json"})`` + upon detecting trainer 1's failure which would propagate the contents + of trainer 1's error file to the scheduler's init process. + """ + + def __init__(self, name: str, failures: Dict[GlobalRank, ProcessFailure]): + self.name = name + self.failures = failures + assert ( + self.failures + ) # does not make sense to create a ChildFaileError with no failures + super().__init__(self.format_msg()) + + def get_first_failure(self) -> Tuple[GlobalRank, ProcessFailure]: + rank = min(self.failures.keys(), key=lambda r: self.failures[r].timestamp) + return rank, self.failures[rank] + + def format_msg(self, boarder_delim="=", section_delim="-"): + title = f"{self.name} FAILED" + root_rank, root_failure = self.get_first_failure() + + root_failure_fmt: str = "" + other_failures_fmt: List[str] = [] + width = len(title) + for idx, (rank, failure) in enumerate(self.failures.items()): + fmt, w = self._format_failure(idx, rank, failure) + width = max(width, w) + if rank == root_rank: + root_failure_fmt = fmt + else: + other_failures_fmt.append(fmt) + + # upper boundary on width + width = min(width, 60) + + return Template(_MSG_FORMAT_TEMPLATE).substitute( + boarder=boarder_delim * width, + title=title, + section=section_delim * width, + root_failure=root_failure_fmt, + other_failures="\n".join(other_failures_fmt or [" "]), + ) + + def _format_failure( + self, idx: int, rank: int, failure: ProcessFailure + ) -> Tuple[str, int]: + + # failure.message is either a str (when the failure does not generate a traceback - e.g. signals) + # or a dict (json) of the form + # {"message": $ERROR_MSG, "extraInfo": {"py_callstack": $TRACEBACK, timestamp: $TS}} + # so the display logic is: + # 1. if failure.message is not a dict (it is a str) just show it as is + # 2. else try to get the traceback (py_callstack) + # 3. if the traceback is not there, use the message + # 4. if the message is not there show + msg = failure.message + if isinstance(failure.message, dict): + msg = ( + failure.message.get("extraInfo", {}) + .get("py_callstack", failure.message.get("message", "")) + .replace("\n", "\n ") # to properly indent the traceback + ) + + fmt = Template(_FAILURE_FORMAT_TEMPLATE).substitute( + idx=idx, + time=failure.timestamp_isoformat(), + hostname=socket.getfqdn(), + rank=rank, + local_rank=failure.local_rank, + exitcode=failure.exitcode, + pid=failure.pid, + error_file=failure.error_file, + message=msg, + ) + width = 0 + for line in fmt.split("\n"): + width = max(width, len(line)) + return fmt, width + + +def record( + fn: Callable[..., T], error_handler: Optional[ErrorHandler] = None +) -> Callable[..., T]: + """ + Syntactic sugar to record errors/exceptions that happened in the decorated + function using the provided ``error_handler``. + + Using this decorator is equivalent to: + + :: + + error_handler = get_error_handler() + error_handler.initialize() + try: + foobar() + except ChildFailedError as e: + _, failure = e.get_first_failure() + error_handler.dump_error_file(failure.error_file, failure.exitcode) + raise + except Exception as e: + error_handler.record(e) + raise + + .. important:: use this decorator once per process at the top level method, + typically this is the main method. + + Example + + :: + + @record + def main(): + pass + + if __name__=="__main__": + main() + + """ + if not error_handler: + error_handler = get_error_handler() + + def wrap(f): + @wraps(f) + def wrapper(*args, **kwargs): + assert error_handler is not None # assertion for mypy type checker + error_handler.initialize() + try: + return f(*args, **kwargs) + except SystemExit as se: + # For run_path based entrypoints, SystemExit with code = 0 will never exit. + # Handling it here by returning a value: + if se.code == 0: + return None + else: + raise + except ChildFailedError as e: + rank, failure = e.get_first_failure() + if failure.error_file != _NOT_AVAILABLE: + error_handler.dump_error_file(failure.error_file, failure.exitcode) + else: + log.info( + ( + "local_rank %s FAILED with no error file." + " Decorate your entrypoint fn with @record for traceback info." + " See: https://pytorch.org/docs/stable/elastic/errors.html", + rank + ) + ) + raise + except Exception as e: + error_handler.record_exception(e) + raise + + return wrapper + + return wrap(fn) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1792562a958827586ccf3bc84402720f6feac72 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/error_handler.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/error_handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8305233074e82d7dfbf6109ea27e782c43643425 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/error_handler.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/handlers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/handlers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f93662535361d783ebb0fd6665f715becbfd8a17 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__pycache__/handlers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/error_handler.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/error_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..481e9da23b4b1557d8bc7d1e990267664fee063a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/error_handler.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import faulthandler +import json +import logging +import os +import time +import traceback +import warnings +from typing import Any, Dict, Optional + +__all__ = ['ErrorHandler'] + +log = logging.getLogger(__name__) + + +class ErrorHandler: + """ + Write the provided exception object along with some other metadata about + the error in a structured way in JSON format to an error file specified by the + environment variable: ``TORCHELASTIC_ERROR_FILE``. If this environment + variable is not set, then simply logs the contents of what would have been + written to the error file. + + This handler may be subclassed to customize the handling of the error. + Subclasses should override ``initialize()`` and ``record_exception()``. + """ + + def _get_error_file_path(self) -> Optional[str]: + """ + Return the error file path. + + May return ``None`` to have the structured error be logged only. + """ + return os.environ.get("TORCHELASTIC_ERROR_FILE", None) + + def initialize(self) -> None: + """ + Call prior to running code that we wish to capture errors/exceptions. + + Typically registers signal/fault handlers. Users can override this + function to add custom initialization/registrations that aid in + propagation/information of errors/signals/exceptions/faults. + """ + try: + faulthandler.enable(all_threads=True) + except Exception as e: + warnings.warn(f"Unable to enable fault handler. {type(e).__name__}: {e}") + + def _write_error_file(self, file_path: str, error_msg: str) -> None: + """Write error message to the file.""" + try: + with open(file_path, "w") as fp: + fp.write(error_msg) + except Exception as e: + warnings.warn(f"Unable to write error to file. {type(e).__name__}: {e}") + + def record_exception(self, e: BaseException) -> None: + """ + Write a structured information about the exception into an error file in JSON format. + + If the error file cannot be determined, then logs the content + that would have been written to the error file. + """ + file = self._get_error_file_path() + if file: + data = { + "message": { + "message": f"{type(e).__name__}: {e}", + "extraInfo": { + "py_callstack": traceback.format_exc(), + "timestamp": str(int(time.time())), + }, + } + } + with open(file, "w") as fp: + json.dump(data, fp) + + def override_error_code_in_rootcause_data( + self, + rootcause_error_file: str, + rootcause_error: Dict[str, Any], + error_code: int = 0, + ): + """Modify the rootcause_error read from the file, to correctly set the exit code.""" + if "message" not in rootcause_error: + log.warning( + "child error file (%s) does not have field `message`. \n" + "cannot override error code: %s", + rootcause_error_file, error_code + ) + elif isinstance(rootcause_error["message"], str): + log.warning( + "child error file (%s) has a new message format. \n" + "skipping error code override", + rootcause_error_file + ) + else: + rootcause_error["message"]["errorCode"] = error_code + + def dump_error_file(self, rootcause_error_file: str, error_code: int = 0): + """Dump parent error file from child process's root cause error and error code.""" + with open(rootcause_error_file) as fp: + rootcause_error = json.load(fp) + # Override error code since the child process cannot capture the error code if it + # is terminated by signals like SIGSEGV. + if error_code: + self.override_error_code_in_rootcause_data(rootcause_error_file, rootcause_error, error_code) + log.debug( + "child error file (%s) contents:\n" + "%s", + rootcause_error_file, json.dumps(rootcause_error, indent=2) + ) + + my_error_file = self._get_error_file_path() + if my_error_file: + # Guard against existing error files + # This can happen when the child is created using multiprocessing + # and the same env var (TORCHELASTIC_ERROR_FILE) is used on the + # parent and child to specify the error files (respectively) + # because the env vars on the child is set in the wrapper function + # and by default the child inherits the parent's env vars, if the child + # process receives a signal before the wrapper function kicks in + # and the signal handler writes to the error file, then the child + # will write to the parent's error file. In this case just log the + # original error file contents and overwrite the error file. + self._rm(my_error_file) + self._write_error_file(my_error_file, json.dumps(rootcause_error)) + log.info("dumped error file to parent's %s", my_error_file) + else: + log.error( + "no error file defined for parent, to copy child error file (%s)", rootcause_error_file + ) + + def _rm(self, my_error_file): + if os.path.isfile(my_error_file): + # Log the contents of the original file. + with open(my_error_file) as fp: + try: + original = json.dumps(json.load(fp), indent=2) + log.warning( + "%s already exists" + " and will be overwritten." + " Original contents:\n%s", + my_error_file, original + ) + except json.decoder.JSONDecodeError as err: + log.warning( + "%s already exists" + " and will be overwritten." + " Unable to load original contents:\n", + my_error_file + ) + os.remove(my_error_file) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/handlers.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..3071aef1711785602265a4dec81405b382444132 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/handlers.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +# Multiprocessing error-reporting module + + +from torch.distributed.elastic.multiprocessing.errors.error_handler import ErrorHandler + +__all__ = ['get_error_handler'] + +def get_error_handler(): + return ErrorHandler() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/redirects.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/redirects.py new file mode 100644 index 0000000000000000000000000000000000000000..e632558193832b66515e1f8a0af8975cb85d9874 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/redirects.py @@ -0,0 +1,102 @@ +# !/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Taken and modified from original source: +# https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/ +import ctypes +import logging +import os +import sys +from contextlib import contextmanager +from functools import partial + +IS_WINDOWS = sys.platform == "win32" +IS_MACOS = sys.platform == "darwin" + + +logger = logging.getLogger(__name__) + + +def get_libc(): + if IS_WINDOWS or IS_MACOS: + logger.warning( + "NOTE: Redirects are currently not supported in Windows or MacOs." + ) + return None + else: + return ctypes.CDLL("libc.so.6") + + +libc = get_libc() + + +def _c_std(stream: str): + return ctypes.c_void_p.in_dll(libc, stream) + + +def _python_std(stream: str): + return {"stdout": sys.stdout, "stderr": sys.stderr}[stream] + + +_VALID_STD = {"stdout", "stderr"} + + +@contextmanager +def redirect(std: str, to_file: str): + """ + Redirect ``std`` (one of ``"stdout"`` or ``"stderr"``) to a file in the path specified by ``to_file``. + + This method redirects the underlying std file descriptor (not just python's ``sys.stdout|stderr``). + See usage for details. + + Directory of ``dst_filename`` is assumed to exist and the destination file + is overwritten if it already exists. + + .. note:: Due to buffering cross source writes are not guaranteed to + appear in wall-clock order. For instance in the example below + it is possible for the C-outputs to appear before the python + outputs in the log file. + + Usage: + + :: + + # syntactic-sugar for redirect("stdout", "tmp/stdout.log") + with redirect_stdout("/tmp/stdout.log"): + print("python stdouts are redirected") + libc = ctypes.CDLL("libc.so.6") + libc.printf(b"c stdouts are also redirected" + os.system("echo system stdouts are also redirected") + + print("stdout restored") + + """ + if std not in _VALID_STD: + raise ValueError( + f"unknown standard stream <{std}>, must be one of {_VALID_STD}" + ) + + c_std = _c_std(std) + python_std = _python_std(std) + std_fd = python_std.fileno() + + def _redirect(dst): + libc.fflush(c_std) + python_std.flush() + os.dup2(dst.fileno(), std_fd) + + with os.fdopen(os.dup(std_fd)) as orig_std, open(to_file, mode="w+b") as dst: + _redirect(dst) + try: + yield + finally: + _redirect(orig_std) + + +redirect_stdout = partial(redirect, "stdout") +redirect_stderr = partial(redirect, "stderr") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4c335964c7322aac57a485c3d4ea160b5ab34dba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +from torch.distributed.elastic.multiprocessing.subprocess_handler.handlers import ( + get_subprocess_handler, +) +from torch.distributed.elastic.multiprocessing.subprocess_handler.subprocess_handler import ( + SubprocessHandler, +) + +__all__ = ["SubprocessHandler", "get_subprocess_handler"] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/handlers.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..8d4477452a200edb881ae3573ff63db6c9f67e65 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/handlers.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +from typing import Dict, Tuple + +from torch.distributed.elastic.multiprocessing.subprocess_handler.subprocess_handler import ( + SubprocessHandler, +) + +__all__ = ["get_subprocess_handler"] + + +def get_subprocess_handler( + entrypoint: str, + args: Tuple, + env: Dict[str, str], + stdout: str, + stderr: str, + local_rank_id: int, +): + return SubprocessHandler( + entrypoint=entrypoint, + args=args, + env=env, + stdout=stdout, + stderr=stderr, + local_rank_id=local_rank_id, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..7cacf986857500afb2e18f7af4f97cd3867300ea --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/subprocess_handler/subprocess_handler.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import os +import signal +import subprocess +import sys + +from typing import Any, Dict, Optional, Tuple + +__all__ = ["SubprocessHandler"] + +IS_WINDOWS = sys.platform == "win32" + + +def _get_default_signal() -> signal.Signals: + """Get the default termination signal. SIGTERM for unix, CTRL_C_EVENT for windows.""" + if IS_WINDOWS: + return signal.CTRL_C_EVENT # type: ignore[attr-defined] # noqa: F821 + else: + return signal.SIGTERM + + +class SubprocessHandler: + """ + Convenience wrapper around python's ``subprocess.Popen``. Keeps track of + meta-objects associated to the process (e.g. stdout and stderr redirect fds). + """ + + def __init__( + self, + entrypoint: str, + args: Tuple, + env: Dict[str, str], + stdout: str, + stderr: str, + local_rank_id: int, + ): + self._stdout = open(stdout, "w") if stdout else None + self._stderr = open(stderr, "w") if stderr else None + # inherit parent environment vars + env_vars = os.environ.copy() + env_vars.update(env) + + args_str = (entrypoint, *[str(e) for e in args]) + self.local_rank_id = local_rank_id + self.proc: subprocess.Popen = self._popen(args_str, env_vars) + + def _popen(self, args: Tuple, env: Dict[str, str]) -> subprocess.Popen: + kwargs: Dict[str, Any] = {} + if not IS_WINDOWS: + kwargs["start_new_session"] = True + return subprocess.Popen( + # pyre-fixme[6]: Expected `Union[typing.Sequence[Union[_PathLike[bytes], + # _PathLike[str], bytes, str]], bytes, str]` for 1st param but got + # `Tuple[str, *Tuple[Any, ...]]`. + args=args, + env=env, + stdout=self._stdout, + stderr=self._stderr, + **kwargs, + ) + + def close(self, death_sig: Optional[signal.Signals] = None) -> None: + if not death_sig: + death_sig = _get_default_signal() + if IS_WINDOWS: + self.proc.send_signal(death_sig) + else: + os.killpg(self.proc.pid, death_sig) + if self._stdout: + self._stdout.close() + if self._stderr: + self._stderr.close() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0d25f8080c26916486bfc567ee4206a3b8c8da6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env/python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +from torch.distributed.launcher.api import ( # noqa: F401 + LaunchConfig, + elastic_launch, + launch_agent, +) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7429302f6ab7af47de4472f5cc08d51ffe153e7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/api.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5247773866acd7289d33c4f46685fde03bf4157f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/__pycache__/api.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/api.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/api.py new file mode 100644 index 0000000000000000000000000000000000000000..f2b4aca644f84384d791dbedff8cb7d17ecb7994 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/launcher/api.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import sys +import uuid +from dataclasses import dataclass, field +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch.distributed.elastic.rendezvous.registry as rdzv_registry +from torch.distributed.elastic import events, metrics +from torch.distributed.elastic.agent.server.api import WorkerSpec +from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent +from torch.distributed.elastic.multiprocessing import DefaultLogsSpecs, LogsSpecs, SignalException +from torch.distributed.elastic.multiprocessing.errors import ChildFailedError +from torch.distributed.elastic.rendezvous import RendezvousParameters +from torch.distributed.elastic.rendezvous.utils import parse_rendezvous_endpoint +from torch.distributed.elastic.utils.logging import get_logger + +__all__ = ['LaunchConfig', 'elastic_launch', 'launch_agent'] + +logger = get_logger(__name__) + + +@dataclass +class LaunchConfig: + """ + Creates a rendezvous config. + + Args: + min_nodes: Minimum amount of nodes that the user function will + be launched on. Elastic agent ensures that the user + function start only when the min_nodes amount enters + the rendezvous. + max_nodes: Maximum amount of nodes that the user function + will be launched on. + nproc_per_node: On each node the elastic agent will launch + this amount of workers that will execute user + defined function. + rdzv_backend: rdzv_backend to use in the rendezvous (zeus-adapter, etcd). + rdzv_endpoint: The endpoint of the rdzv sync. storage. + rdzv_configs: Key, value pair that specifies rendezvous specific configuration. + rdzv_timeout: Legacy argument that specifies timeout for the rendezvous. It is going + to be removed in future versions, see the note below. The default timeout is 900 seconds. + run_id: The unique run id of the job (if not passed a unique one will be + deduced from run environment - flow workflow id in flow - or auto generated). + role: User defined role of the worker (defaults to "trainer"). + max_restarts: The maximum amount of restarts that elastic agent will conduct + on workers before failure. + monitor_interval: The interval in seconds that is used by the elastic_agent + as a period of monitoring workers. + start_method: The method is used by the elastic agent to start the + workers (spawn, fork, forkserver). + metrics_cfg: configuration to initialize metrics. + local_addr: address of the local node if any. If not set, a lookup on the local + machine's FQDN will be performed. + local_ranks_filter: ranks for which to show logs in console. If not set, show from all. + ..note: + `rdzv_timeout` is a legacy argument that will be removed in future. + Set the timeout via `rdzv_configs['timeout']` + + """ + + min_nodes: int + max_nodes: int + nproc_per_node: int + logs_specs: Optional[LogsSpecs] = None + run_id: str = "" + role: str = "default_role" + rdzv_endpoint: str = "" + rdzv_backend: str = "etcd" + rdzv_configs: Dict[str, Any] = field(default_factory=dict) + rdzv_timeout: int = -1 + max_restarts: int = 3 + monitor_interval: float = 30 + start_method: str = "spawn" + log_line_prefix_template: Optional[str] = None + metrics_cfg: Dict[str, str] = field(default_factory=dict) + local_addr: Optional[str] = None + + def __post_init__(self): + default_timeout = 900 + if self.rdzv_timeout != -1: + self.rdzv_configs["timeout"] = self.rdzv_timeout + elif "timeout" not in self.rdzv_configs: + self.rdzv_configs["timeout"] = default_timeout + + # Post-processing to enable refactoring to introduce logs_specs due to non-torchrun API usage + if self.logs_specs is None: + self.logs_specs = DefaultLogsSpecs() + + +class elastic_launch: + """ + Launches an torchelastic agent on the container that invoked the entrypoint. + + 1. Pass the ``entrypoint`` arguments as non ``kwargs`` (e.g. no named parameters)/ + ``entrypoint`` can be a function or a command. + 2. The return value is a map of each worker's output mapped + by their respective global rank. + + Usage + + :: + + def worker_fn(foo): + # ... + + def main(): + # entrypoint is a function. + outputs = elastic_launch(LaunchConfig, worker_fn)(foo) + # return rank 0's output + return outputs[0] + + # entrypoint is a command and ``script.py`` is the python module. + outputs = elastic_launch(LaunchConfig, "script.py")(args) + outputs = elastic_launch(LaunchConfig, "python")("script.py") + """ + + def __init__( + self, + config: LaunchConfig, + entrypoint: Union[Callable, str, None], + ): + self._config = config + self._entrypoint = entrypoint + + def __call__(self, *args): + return launch_agent(self._config, self._entrypoint, list(args)) + + +def _get_entrypoint_name( + entrypoint: Union[Callable, str, None], args: List[Any] +) -> str: + """Retrieve entrypoint name with the rule: + 1. If entrypoint is a function, use ``entrypoint.__qualname__``. + 2. If entrypoint is a string, check its value: + 2.1 if entrypoint equals to ``sys.executable`` (like "python"), use the first element from ``args`` + which does not start with hifen letter (for example, "-u" will be skipped). + 2.2 otherwise, use ``entrypoint`` value. + 3. Otherwise, return empty string. + """ + if isinstance(entrypoint, Callable): # type: ignore[arg-type] + return entrypoint.__name__ # type: ignore[union-attr] + elif isinstance(entrypoint, str): + if entrypoint == sys.executable: + return next((arg for arg in args if arg[0] != "-"), "") + else: + return entrypoint + else: + return "" + + +def _get_addr_and_port( + rdzv_parameters: RendezvousParameters, +) -> Tuple[Optional[str], Optional[int]]: + if rdzv_parameters.backend != "static": + return (None, None) + endpoint = rdzv_parameters.endpoint + endpoint = endpoint.strip() + if not endpoint: + raise ValueError( + "Endpoint is missing in endpoint. Try to add --master-addr and --master-port" + ) + master_addr, master_port = parse_rendezvous_endpoint(endpoint, default_port=-1) + if master_port == -1: + raise ValueError( + f"port is missing in endpoint: {endpoint}. Try to specify --master-port" + ) + return (master_addr, master_port) + + +def launch_agent( + config: LaunchConfig, + entrypoint: Union[Callable, str, None], + args: List[Any], +) -> Dict[int, Any]: + if not config.run_id: + run_id = str(uuid.uuid4().int) + logger.warning("config has no run_id, generated a random run_id: %s", run_id) + config.run_id = run_id + + entrypoint_name = _get_entrypoint_name(entrypoint, args) + + logger.info( + "Starting elastic_operator with launch configs:\n" + " entrypoint : %(entrypoint)s\n" + " min_nodes : %(min_nodes)s\n" + " max_nodes : %(max_nodes)s\n" + " nproc_per_node : %(nproc_per_node)s\n" + " run_id : %(run_id)s\n" + " rdzv_backend : %(rdzv_backend)s\n" + " rdzv_endpoint : %(rdzv_endpoint)s\n" + " rdzv_configs : %(rdzv_configs)s\n" + " max_restarts : %(max_restarts)s\n" + " monitor_interval : %(monitor_interval)s\n" + " log_dir : %(log_dir)s\n" + " metrics_cfg : %(metrics_cfg)s\n", + { + "entrypoint": entrypoint_name, + "min_nodes": config.min_nodes, + "max_nodes": config.max_nodes, + "nproc_per_node": config.nproc_per_node, + "run_id": config.run_id, + "rdzv_backend": config.rdzv_backend, + "rdzv_endpoint": config.rdzv_endpoint, + "rdzv_configs": config.rdzv_configs, + "max_restarts": config.max_restarts, + "monitor_interval": config.monitor_interval, + "log_dir": config.logs_specs.root_log_dir, # type: ignore[union-attr] + "metrics_cfg": config.metrics_cfg + } + ) + + rdzv_parameters = RendezvousParameters( + backend=config.rdzv_backend, + endpoint=config.rdzv_endpoint, + run_id=config.run_id, + min_nodes=config.min_nodes, + max_nodes=config.max_nodes, + local_addr=config.local_addr, + **config.rdzv_configs, + ) + + master_addr, master_port = _get_addr_and_port(rdzv_parameters) + + spec = WorkerSpec( + role=config.role, + local_world_size=config.nproc_per_node, + entrypoint=entrypoint, + args=tuple(args), + rdzv_handler=rdzv_registry.get_rendezvous_handler(rdzv_parameters), + max_restarts=config.max_restarts, + monitor_interval=config.monitor_interval, + master_addr=master_addr, + master_port=master_port, + local_addr=config.local_addr, + ) + + agent = LocalElasticAgent( + spec=spec, + logs_specs=config.logs_specs, # type: ignore[arg-type] + start_method=config.start_method, + log_line_prefix_template=config.log_line_prefix_template, + ) + + shutdown_rdzv = True + try: + metrics.initialize_metrics(metrics.MetricsConfig(config.metrics_cfg)) + + result = agent.run() + # records that agent.run() has succeeded NOT that workers have succeeded + events.record(agent.get_event_succeeded()) + + if result.is_failed(): + # ChildFailedError is treated specially by @record + # if the error files for the failed children exist + # @record will copy the first error (root cause) + # to the error file of the launcher process. + raise ChildFailedError( + name=entrypoint_name, + failures=result.failures, + ) + + return result.return_values + except ChildFailedError: + raise + except SignalException: + # when the agent dies with a signal do NOT shutdown the rdzv_handler + # since this closes the rendezvous on this rdzv_id permanently and + # prevents any additional scaling events + shutdown_rdzv = False + events.record(agent.get_event_failed()) + raise + except Exception: + events.record(agent.get_event_failed()) + raise + finally: + if shutdown_rdzv: + spec.rdzv_handler.shutdown() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..82886d3c774b78de0526b2bce2bfc06db6f23a92 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__init__.py @@ -0,0 +1,34 @@ +""" +:mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list +of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the +optimizer locally on the workers where the parameters live. The distributed +optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to +apply the gradients on each worker. +""" +import torch +from torch import optim + +from .apply_optimizer_in_backward import ( + _apply_optimizer_in_backward, + _get_in_backward_optimizers, +) +from .functional_adadelta import _FunctionalAdadelta + +from .functional_adagrad import _FunctionalAdagrad +from .functional_adam import _FunctionalAdam +from .functional_adamax import _FunctionalAdamax +from .functional_adamw import _FunctionalAdamW +from .functional_rmsprop import _FunctionalRMSprop +from .functional_rprop import _FunctionalRprop +from .functional_sgd import _FunctionalSGD +from .named_optimizer import _NamedOptimizer +from .utils import as_functional_optim + + +# DistributedOptimizer imports torch.distributed.rpc names, so gate availability +# based on RPC being available. +if hasattr(torch._C, "_rpc_init"): + from .optimizer import DistributedOptimizer + +from .post_localSGD_optimizer import PostLocalSGDOptimizer +from .zero_redundancy_optimizer import ZeroRedundancyOptimizer diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..418afafea8b984d0b5918a9a2633f552e664cca6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adadelta.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adadelta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd026e361a21c341a67dd42823ab22805538a78e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adadelta.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adagrad.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adagrad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baec5d05dfaff54b25dc16366bf1a346ce5466cc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adagrad.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adam.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77b172c99caf8ccde9703d4bf42f30bde5579d35 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adam.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adamw.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adamw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..536055dbc906387161ad6fe031f2982441fc9c07 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adamw.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rprop.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rprop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bd4b3c6c1812629dee0ad403c26267860b69b78 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rprop.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/named_optimizer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/named_optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe193c54635405ec4b553b24f25168e64cdccc41 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/named_optimizer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/post_localSGD_optimizer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/post_localSGD_optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52e00fb3a8ab8ab809ba5a5e7f95af0045ba0f49 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/post_localSGD_optimizer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74079d09a8d3b5fa6162ec8467ca0f5f8aea5649 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/apply_optimizer_in_backward.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/apply_optimizer_in_backward.py new file mode 100644 index 0000000000000000000000000000000000000000..6bd182cca5736fbd9df7373e984e92235ef12617 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/apply_optimizer_in_backward.py @@ -0,0 +1,118 @@ +from typing import Any, Dict, Iterable, List, no_type_check, Type + +import torch + +__all__: List[str] = [] + +# WeakTensorKeyDictionary to store relevant meta-data for the Tensor/Parameter +# without changing it's life-time. +# NOTE: Alternative is to add the meta-data as an attribute to the tensor, +# but that will serialize the meta-data if Tensor is serialized. +param_to_optim_hook_handle_map = torch.utils.weak.WeakTensorKeyDictionary() +param_to_acc_grad_map = torch.utils.weak.WeakTensorKeyDictionary() + +@no_type_check +def _apply_optimizer_in_backward( + optimizer_class: Type[torch.optim.Optimizer], + params: Iterable[torch.nn.Parameter], + optimizer_kwargs: Dict[str, Any], + register_hook: bool = True, +) -> None: + """ + Upon ``backward()``, the optimizer specified for each parameter will fire after + the gradient has been accumulated into the parameter. + + Note - gradients for these parameters will be set to None after ``backward()``. + This means that any other optimizer not specified via `_apply_optimizer_in_backward` + over this parameter will be a no-op. + + Args: + optimizer_class: (Type[torch.optim.Optimizer]): Optimizer to apply to parameter + params: (Iterator[nn.Parameter]): parameters to apply optimizer state to + optimizer_kwargs: (Dict[str, Any]): kwargs to pass to optimizer constructor + register_hook: (bool): whether to register a hook that runs the optimizer + after gradient for this parameter is accumulated. This is the default + way that optimizer in backward is implemented, but specific use cases + (such as DDP) may wish to override this to implement custom behavior. + (Default = True) + + Example:: + params_generator = model.parameters() + param_1 = next(params_generator) + remainder_params = list(params_generator) + + apply_optimizer_in_backward(torch.optim.SGD, [param_1], {"lr": .02}) + apply_optimizer_in_backward(torch.optim.Adam, remainder_params, {"lr": .04}) + + model(...).sum().backward() # after backward, parameters will already + # have their registered optimizer(s) applied. + + """ + torch._C._log_api_usage_once( + "torch.distributed.optim.apply_optimizer_in_backward" + ) + + @no_type_check + def _apply_optimizer_in_backward_to_param(param: torch.nn.Parameter) -> None: + # view_as creates a node in autograd graph that allows us access to the + # parameter's AccumulateGrad autograd function object. We register a + # hook on this object to fire the optimizer when the gradient for + # this parameter is ready (has been accumulated into .grad field) + + # Don't create a new acc_grad if we already have one + # i.e. for shared parameters or attaching multiple optimizers to a param. + if param not in param_to_acc_grad_map: + param_to_acc_grad_map[param] = param.view_as(param).grad_fn.next_functions[0][0] + + optimizer = optimizer_class([param], **optimizer_kwargs) + + if not hasattr(param, "_in_backward_optimizers"): + param._in_backward_optimizers = [] # type: ignore[attr-defined] + # TODO: Remove these attributes once we have a better way of accessing + # optimizer classes and kwargs for a parameter. + param._optimizer_classes = [] # type: ignore[attr-defined] + param._optimizer_kwargs = [] # type: ignore[attr-defined] + + param._in_backward_optimizers.append(optimizer) # type: ignore[attr-defined] + param._optimizer_classes.append(optimizer_class) # type: ignore[attr-defined] + param._optimizer_kwargs.append(optimizer_kwargs) # type: ignore[attr-defined] + + if not register_hook: + return + + def optimizer_hook(*_unused) -> None: + for opt in param._in_backward_optimizers: # type: ignore[attr-defined] + opt.step() + + param.grad = None + + handle = param_to_acc_grad_map[param].register_hook(optimizer_hook) # type: ignore[attr-defined] + if param not in param_to_optim_hook_handle_map: + param_to_optim_hook_handle_map[param] = [] + param_to_optim_hook_handle_map[param].append(handle) + + for param in params: + _apply_optimizer_in_backward_to_param(param) + + +def _get_in_backward_optimizers(module: torch.nn.Module) -> List[torch.optim.Optimizer]: + """ + Return a list of in-backward optimizers applied to ``module``'s parameters. Note that these + optimizers are not intended to directly have their ``step`` or ``zero_grad`` methods called + by the user and are intended to be used for things like checkpointing. + + Args: + module: (torch.nn.Module): model to retrieve in-backward optimizers for + + Returns: + List[torch.optim.Optimizer]: the in-backward optimizers. + + Example:: + _apply_optimizer_in_backward(torch.optim.SGD, model.parameters(), {'lr': 0.01}) + optims = _get_optimizers_in_backward(model) + """ + optims: List[torch.optim.Optimizer] = [] + for param in module.parameters(): + optims.extend(getattr(param, "_in_backward_optimizers", [])) + + return optims diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adadelta.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adadelta.py new file mode 100644 index 0000000000000000000000000000000000000000..803132e5d7a42ecab2f51835d69b4b272a2cb012 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adadelta.py @@ -0,0 +1,102 @@ +from typing import Dict, List, Optional + +import torch +import torch.optim._functional as F + +from torch import Tensor + +__all__: List[str] = [] + +# Define a TorchScript compatible Functional Adadelta Optimizer +# where we use these optimizer in a functional way. +# Instead of using the `param.grad` when updating parameters, +# we explicitly allow the distributed optimizer pass gradients to +# the `step` function. In this way, we could separate the gradients +# and parameters and allow multithreaded trainer to update the +# parameters without data traces on accumulating to the same .grad. +# NOTE: This should be only used by distributed optimizer internals +# and not meant to expose to the user. +@torch.jit.script +class _FunctionalAdadelta: + def __init__( + self, + params: List[Tensor], + lr: float = 1.0, + rho: float = 0.9, + eps: float = 1e-6, + weight_decay: float = 0.0, + foreach: bool = False, + maximize: bool = False, + _allow_empty_param_list: bool = False, + ): + self.defaults = { + "lr": lr, + "rho": rho, + "eps": eps, + "weight_decay": weight_decay, + } + self.foreach = foreach + self.maximize = maximize + + if len(params) == 0 and not _allow_empty_param_list: + raise ValueError("optimizer got an empty parameter list") + + # NOTE: we only have one param_group and don't allow user to add additional + # param group as it's not a common use case. + self.param_group = {"params": params} + + self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {}) + + def step(self, gradients: List[Optional[Tensor]]): + params = self.param_group["params"] + params_with_grad = [] + grads = [] + square_avgs = [] + acc_deltas = [] + lr = self.defaults["lr"] + rho = self.defaults["rho"] + eps = self.defaults["eps"] + weight_decay = self.defaults["weight_decay"] + + if len(params) != len(gradients): + raise ValueError( + "the gradients passed in does not equal to the size of the parameters!" + + f"Params length: {len(params)}. " + + f"Gradients length: {len(gradients)}" + ) + has_complex = False + for param, gradient in zip(params, gradients): + if gradient is not None: + has_complex |= torch.is_complex(param) + params_with_grad.append(param) + grads.append(gradient) + # Lazy state initialization + if param not in self.state: + self.state[param] = {} + state = self.state[param] + state["step"] = torch.tensor(0.0) + state["square_avg"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + state["acc_delta"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + + state = self.state[param] + square_avgs.append(state["square_avg"]) + acc_deltas.append(state["acc_delta"]) + + with torch.no_grad(): + F.adadelta( + params_with_grad, + grads, + square_avgs, + acc_deltas, + lr=lr, + rho=rho, + eps=eps, + weight_decay=weight_decay, + foreach=self.foreach, + maximize=self.maximize, + has_complex=has_complex + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adagrad.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adagrad.py new file mode 100644 index 0000000000000000000000000000000000000000..96e075c8216ca9b398c8448a2caec075e821b63c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adagrad.py @@ -0,0 +1,104 @@ +from typing import Dict, List, Optional + +import torch +import torch.optim._functional as F + +from torch import Tensor + +__all__: List[str] = [] + +# Define a TorchScript compatible Functional Adagrad Optimizer +# where we use these optimizer in a functional way. +# Instead of using the `param.grad` when updating parameters, +# we explicitly let the user pass gradients to the `step` function +# this is so that we could separate the gradients and parameters +# and allow multithreaded trainer to update the parameters +# without data traces on accumulating to the same .grad. +# NOTE: This should be only used by distributed optimizer internals +# and not meant to expose to the user. +@torch.jit.script +class _FunctionalAdagrad: + def __init__( + self, + params: List[Tensor], + lr: float = 1e-2, + lr_decay: float = 0.0, + weight_decay: float = 0.0, + initial_accumulator_value: float = 0.0, + warmup_lr_multiplier: float = 1.0, + warmup_num_iters: float = 0.0, + eps: float = 1e-10, + coalesce_grad: bool = True, + foreach: bool = False, + maximize: bool = False, + _allow_empty_param_list: bool = False, + ): + self.defaults = { + "lr": lr, + "lr_decay": lr_decay, + "eps": eps, + "weight_decay": weight_decay, + "initial_accumulator_value": initial_accumulator_value, + "warmup_lr_multiplier": warmup_lr_multiplier, + "warmup_num_iters": warmup_num_iters, + } + self.coalesce_grad = coalesce_grad + self.foreach = foreach + self.maximize = maximize + self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {}) + + if len(params) == 0 and not _allow_empty_param_list: + raise ValueError("optimizer got an empty parameter list") + + # NOTE: we only have one param_group and don't allow user to add additional + # param group as it's not a common use case. + self.param_group = {"params": params} + + # TODO: no union or any types in TorchScript, make step a scalar tensor instead + # This is also needed by if we want to share_memory on the step across processes + for p in self.param_group["params"]: + self.state[p] = { + "sum": torch.full_like(p.data, initial_accumulator_value), + "step": torch.tensor(0.0), + } + + def step(self, gradients: List[Optional[Tensor]]): + params = self.param_group["params"] + params_with_grad = [] + grads = [] + state_sums = [] + state_steps: List[Tensor] = [] + + if len(params) != len(gradients): + raise ValueError( + "the gradients passed in does not equal to the size of the parameters!" + + f"Params length: {len(params)}. " + + f"Gradients length: {len(gradients)}" + ) + + has_sparse_grad, has_complex = False, False + for param, gradient in zip(self.param_group["params"], gradients): + if gradient is not None: + has_sparse_grad |= gradient.is_sparse + has_complex |= torch.is_complex(param) + params_with_grad.append(param) + grads.append(gradient) + state = self.state[param] + state_sums.append(state["sum"]) + state_steps.append(state["step"]) + + with torch.no_grad(): + F.adagrad( + params, + grads, + state_sums, + state_steps, + lr=self.defaults["lr"], + weight_decay=self.defaults["weight_decay"], + lr_decay=self.defaults["lr_decay"], + eps=self.defaults["eps"], + has_sparse_grad=has_sparse_grad, + foreach=self.foreach, + maximize=self.maximize, + has_complex=has_complex, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adam.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..5335df17e089041d942fdf7462f7c38bdfc7fc5c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adam.py @@ -0,0 +1,196 @@ +from typing import Dict, List, Optional, Tuple + +import torch +import torch.optim._functional as F + +from torch import Tensor + +__all__: List[str] = [] + +# Define a TorchScript compatible Functional Adam Optimizer +# where we use these optimizer in a functional way. +# Instead of using the `param.grad` when updating parameters, +# we explicitly allow the distributed optimizer pass gradients to +# the `step` function. In this way, we could separate the gradients +# and parameters and allow multithreaded trainer to update the +# parameters without data traces on accumulating to the same .grad. +# NOTE: This should be only used by distributed optimizer internals +# and not meant to expose to the user. +@torch.jit.script +class _FunctionalAdam: + def __init__( + self, + params: List[Tensor], + lr: float = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 0.0, + amsgrad: bool = False, + maximize: bool = False, + foreach: bool = False, + fused: bool = False, + _allow_empty_param_list: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + self.defaults = { + "lr": lr, + "eps": eps, + "beta1": betas[0], + "beta2": betas[1], + "weight_decay": weight_decay, + } + self.amsgrad = amsgrad + self.maximize = maximize + self.foreach = foreach + self.fused = fused + self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {}) + + if len(params) == 0 and not _allow_empty_param_list: + raise ValueError("optimizer got an empty parameter list") + + # NOTE: we only have one param_group and don't allow user to add additional + # param group as it's not a common use case. + self.param_group = {"params": params} + + def step_param(self, param: Tensor, grad: Optional[Tensor]): + """ + Similar to step, but operates on a single parameter and optionally a + gradient tensor. + """ + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + max_exp_avg_sqs = [] + state_steps: List[Tensor] = [] + has_complex = torch.is_complex(param) + if grad is not None: + params_with_grad.append(param) + grads.append(grad) + if param not in self.state: + self.state[param] = {} + state = self.state[param] + state["step"] = torch.tensor(0.0) + state["exp_avg"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + state["exp_avg_sq"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + if self.amsgrad: + state["max_exp_avg_sq"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + + state = self.state[param] + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + + if self.amsgrad: + max_exp_avg_sqs.append(state["max_exp_avg_sq"]) + + state_steps.append(state["step"]) + with torch.no_grad(): + F.adam( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=self.amsgrad, + has_complex=has_complex, + maximize=self.maximize, + beta1=self.defaults["beta1"], + beta2=self.defaults["beta2"], + lr=self.defaults["lr"], + weight_decay=self.defaults["weight_decay"], + eps=self.defaults["eps"], + foreach=self.foreach, + fused=self.fused, + grad_scale=None, + found_inf=None, + ) + + def step(self, gradients: List[Optional[Tensor]]): + params = self.param_group["params"] + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + max_exp_avg_sqs = [] + state_steps: List[Tensor] = [] + has_complex = False + + if len(params) != len(gradients): + raise ValueError( + "the gradients passed in does not equal to the size of the parameters!" + + f"Params length: {len(params)}. " + + f"Gradients length: {len(gradients)}" + ) + + for param, gradient in zip(self.param_group["params"], gradients): + if gradient is not None: + has_complex |= torch.is_complex(param) + params_with_grad.append(param) + grads.append(gradient) + # Lazy state initialization + if param not in self.state: + self.state[param] = {} + state = self.state[param] + state["step"] = torch.tensor(0.0) + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + if self.amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state["max_exp_avg_sq"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + + state = self.state[param] + + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + + if self.amsgrad: + max_exp_avg_sqs.append(state["max_exp_avg_sq"]) + + state_steps.append(state["step"]) + + with torch.no_grad(): + F.adam( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=self.amsgrad, + has_complex=has_complex, + maximize=self.maximize, + beta1=self.defaults["beta1"], + beta2=self.defaults["beta2"], + lr=self.defaults["lr"], + weight_decay=self.defaults["weight_decay"], + eps=self.defaults["eps"], + foreach=self.foreach, + fused=self.fused, + grad_scale=None, + found_inf=None, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adamax.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adamax.py new file mode 100644 index 0000000000000000000000000000000000000000..f3acd4d271ef3e044956818c942fba5d21ddc50e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adamax.py @@ -0,0 +1,117 @@ +from typing import Dict, List, Optional, Tuple + +import torch +import torch.optim._functional as F + +from torch import Tensor + +__all__: List[str] = [] + +# Define a TorchScript compatible Functional Adamax Optimizer +# where we use these optimizer in a functional way. +# Instead of using the `param.grad` when updating parameters, +# we explicitly allow the distributed optimizer pass gradients to +# the `step` function. In this way, we could separate the gradients +# and parameters and allow multithreaded trainer to update the +# parameters without data traces on accumulating to the same .grad. +# NOTE: This should be only used by distributed optimizer internals +# and not meant to expose to the user. +@torch.jit.script +class _FunctionalAdamax: + def __init__( + self, + params: List[Tensor], + lr: float = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 0.0, + foreach: bool = False, + maximize: bool = False, + _allow_empty_param_list: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + self.defaults = { + "lr": lr, + "eps": eps, + "beta1": betas[0], + "beta2": betas[1], + "weight_decay": weight_decay, + } + self.foreach = foreach + self.maximize = maximize + self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {}) + + if len(params) == 0 and not _allow_empty_param_list: + raise ValueError("optimizer got an empty parameter list") + + # NOTE: we only have one param_group and don't allow user to add additional + # param group as it's not a common use case. + self.param_group = {"params": params} + + def step(self, gradients: List[Optional[Tensor]]): + params = self.param_group["params"] + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_infs = [] + state_steps: List[Tensor] = [] + + if len(params) != len(gradients): + raise ValueError( + "the gradients passed in does not equal to the size of the parameters!" + + f"Params length: {len(params)}. " + + f"Gradients length: {len(gradients)}" + ) + + has_complex = False + for param, gradient in zip(self.param_group["params"], gradients): + if gradient is not None: + has_complex |= torch.is_complex(param) + params_with_grad.append(param) + grads.append(gradient) + # Lazy state initialization + if param not in self.state: + self.state[param] = {} + state = self.state[param] + state["step"] = torch.tensor(0.0) + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_inf"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + + state = self.state[param] + + exp_avgs.append(state["exp_avg"]) + exp_infs.append(state["exp_inf"]) + state_steps.append(state["step"]) + + with torch.no_grad(): + F.adamax( + params_with_grad, + grads, + exp_avgs, + exp_infs, + state_steps, + eps=self.defaults["eps"], + beta1=self.defaults["beta1"], + beta2=self.defaults["beta2"], + lr=self.defaults["lr"], + weight_decay=self.defaults["weight_decay"], + foreach=self.foreach, + maximize=self.maximize, + has_complex=has_complex, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adamw.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adamw.py new file mode 100644 index 0000000000000000000000000000000000000000..40aabafb0ca7889d36e548b9b6c82b7e56fb9467 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_adamw.py @@ -0,0 +1,197 @@ +from typing import Dict, List, Optional, Tuple + +import torch +import torch.optim._functional as F + +from torch import Tensor + +__all__: List[str] = [] + +# Define a TorchScript compatible Functional AdamW Optimizer +# where we use these optimizer in a functional way. +# Instead of using the `param.grad` when updating parameters, +# we explicitly allow the distributed optimizer pass gradients to +# the `step` function. In this way, we could separate the gradients +# and parameters and allow multithreaded trainer to update the +# parameters without data traces on accumulating to the same .grad. +# NOTE: This should be only used by distributed optimizer internals +# and not meant to expose to the user. +@torch.jit.script +class _FunctionalAdamW: + def __init__( + self, + params: List[Tensor], + lr: float = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 1e-2, + amsgrad: bool = False, + maximize: bool = False, + foreach: bool = False, + fused: bool = False, + _allow_empty_param_list: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + self.defaults = { + "lr": lr, + "eps": eps, + "beta1": betas[0], + "beta2": betas[1], + "weight_decay": weight_decay, + } + self.amsgrad = amsgrad + self.maximize = maximize + self.foreach = foreach + self.fused = fused + self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {}) + + if len(params) == 0 and not _allow_empty_param_list: + raise ValueError("optimizer got an empty parameter list") + + # NOTE: we only have one param_group and don't allow user to add additional + # param group as it's not a common use case. + self.param_group = {"params": params} + + def step_param(self, param: Tensor, grad: Optional[Tensor]): + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + max_exp_avg_sqs = [] + state_steps: List[Tensor] = [] + has_complex = torch.is_complex(param) + if grad is not None: + params_with_grad.append(param) + grads.append(grad) + # Lazy state initialization + if param not in self.state: + self.state[param] = {} + state = self.state[param] + state["step"] = torch.tensor(0.0) + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + if self.amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state["max_exp_avg_sq"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + + state = self.state[param] + + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + + if self.amsgrad: + max_exp_avg_sqs.append(state["max_exp_avg_sq"]) + + state_steps.append(state["step"]) + with torch.no_grad(): + F.adamw( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=self.amsgrad, + maximize=self.maximize, + beta1=self.defaults["beta1"], + beta2=self.defaults["beta2"], + lr=self.defaults["lr"], + weight_decay=self.defaults["weight_decay"], + eps=self.defaults["eps"], + foreach=self.foreach, + fused=self.fused, + grad_scale=None, + found_inf=None, + has_complex=has_complex, + ) + + def step(self, gradients: List[Optional[Tensor]]): + params = self.param_group["params"] + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + max_exp_avg_sqs = [] + state_steps: List[Tensor] = [] + + if len(params) != len(gradients): + raise ValueError( + "the gradients passed in does not equal to the size of the parameters!" + + f"Params length: {len(params)}. " + + f"Gradients length: {len(gradients)}" + ) + + has_complex = False + for param, gradient in zip(self.param_group["params"], gradients): + if gradient is not None: + has_complex |= torch.is_complex(param) + params_with_grad.append(param) + grads.append(gradient) + # Lazy state initialization + if param not in self.state: + self.state[param] = {} + state = self.state[param] + state["step"] = torch.tensor(0.0) + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + if self.amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state["max_exp_avg_sq"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + + state = self.state[param] + + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + + if self.amsgrad: + max_exp_avg_sqs.append(state["max_exp_avg_sq"]) + + state_steps.append(state["step"]) + + with torch.no_grad(): + F.adamw( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=self.amsgrad, + maximize=self.maximize, + beta1=self.defaults["beta1"], + beta2=self.defaults["beta2"], + lr=self.defaults["lr"], + weight_decay=self.defaults["weight_decay"], + eps=self.defaults["eps"], + foreach=self.foreach, + fused=self.fused, + grad_scale=None, + found_inf=None, + has_complex=has_complex, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_rmsprop.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_rmsprop.py new file mode 100644 index 0000000000000000000000000000000000000000..4324760df8d533f6f38e89b9183983b7742133c4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_rmsprop.py @@ -0,0 +1,122 @@ +from typing import Dict, List, Optional + +import torch +import torch.optim._functional as F + +from torch import Tensor + +__all__: List[str] = [] + +# Define a TorchScript compatible Functional RMSprop Optimizer +# where we use these optimizer in a functional way. +# Instead of using the `param.grad` when updating parameters, +# we explicitly allow the distributed optimizer pass gradients to +# the `step` function. In this way, we could separate the gradients +# and parameters and allow multithreaded trainer to update the +# parameters without data traces on accumulating to the same .grad. +# NOTE: This should be only used by distributed optimizer internals +# and not meant to expose to the user. +@torch.jit.script +class _FunctionalRMSprop: + def __init__( + self, + params: List[Tensor], + lr: float = 1e-2, + alpha: float = 0.99, + eps: float = 1e-8, + weight_decay: float = 0.0, + momentum: float = 0.0, + centered: bool = False, + foreach: bool = False, + maximize: bool = False, + _allow_empty_param_list: bool = False, + ): + self.defaults = { + "lr": lr, + "alpha": alpha, + "eps": eps, + "weight_decay": weight_decay, + "momentum": momentum, + } + self.centered = centered + self.foreach = foreach + self.maximize = maximize + + if len(params) == 0 and not _allow_empty_param_list: + raise ValueError("optimizer got an empty parameter list") + + # NOTE: we only have one param_group and don't allow user to add additional + # param group as it's not a common use case. + self.param_group = {"params": params} + + self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {}) + + def step(self, gradients: List[Optional[Tensor]]): + params = self.param_group["params"] + params_with_grad = [] + grads = [] + square_avgs = [] + grad_avgs = [] + momentum_buffer_list = [] + lr = self.defaults["lr"] + alpha = self.defaults["alpha"] + eps = self.defaults["eps"] + momentum = self.defaults["momentum"] + weight_decay = self.defaults["weight_decay"] + + if len(params) != len(gradients): + raise ValueError( + "the gradients passed in does not equal to the size of the parameters!" + + f"Params length: {len(params)}. " + + f"Gradients length: {len(gradients)}" + ) + + has_complex = False + for param, gradient in zip(params, gradients): + if gradient is not None: + has_complex |= torch.is_complex(param) + params_with_grad.append(param) + grads.append(gradient) + # Lazy state initialization + if param not in self.state: + self.state[param] = {} + state = self.state[param] + state["step"] = torch.tensor(0.0) + state["square_avg"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + if momentum > 0: + state["momentum_buffer"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + if self.centered: + state["grad_avg"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + + state = self.state[param] + square_avgs.append(state["square_avg"]) + if momentum > 0: + momentum_buffer_list.append(state["momentum_buffer"]) + if self.centered: + grad_avgs.append(state["grad_avg"]) + + state["step"] += 1 + + with torch.no_grad(): + F.rmsprop( + params_with_grad, + grads, + square_avgs, + grad_avgs, + momentum_buffer_list, + lr=lr, + alpha=alpha, + eps=eps, + weight_decay=weight_decay, + momentum=momentum, + centered=self.centered, + foreach=self.foreach, + maximize=self.maximize, + has_complex=has_complex, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_rprop.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_rprop.py new file mode 100644 index 0000000000000000000000000000000000000000..6ac6487b0acb3747e63b0ca1a9168d130a809531 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_rprop.py @@ -0,0 +1,100 @@ +from typing import Dict, List, Optional, Tuple + +import torch +import torch.optim._functional as F + +from torch import Tensor + +__all__: List[str] = [] + +# Define a TorchScript compatible Functional Rprop Optimizer +# where we use these optimizer in a functional way. +# Instead of using the `param.grad` when updating parameters, +# we explicitly allow the distributed optimizer pass gradients to +# the `step` function. In this way, we could separate the gradients +# and parameters and allow multithreaded trainer to update the +# parameters without data traces on accumulating to the same .grad. +# NOTE: This should be only used by distributed optimizer internals +# and not meant to expose to the user. +@torch.jit.script +class _FunctionalRprop: + def __init__( + self, + params: List[Tensor], + lr: float = 1e-2, + etas: Tuple[float, float] = (0.5, 1.2), + step_sizes: Tuple[float, float] = (1e-6, 50), + foreach: bool = False, + maximize: bool = False, + _allow_empty_param_list: bool = False, + ): + self.defaults = { + "lr": lr, + } + self.etas = etas + self.step_sizes = step_sizes + self.foreach = foreach + self.maximize = maximize + + if len(params) == 0 and not _allow_empty_param_list: + raise ValueError("optimizer got an empty parameter list") + + # NOTE: we only have one param_group and don't allow user to add additional + # param group as it's not a common use case. + self.param_group = {"params": params} + + self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {}) + + def step(self, gradients: List[Optional[Tensor]]): + params = self.param_group["params"] + params_with_grad = [] + grads = [] + prevs = [] + step_sizes = [] + lr = self.defaults["lr"] + etaminus, etaplus = self.etas + step_size_min, step_size_max = self.step_sizes + + if len(params) != len(gradients): + raise ValueError( + "the gradients passed in does not equal to the size of the parameters!" + + f"Params length: {len(params)}. " + + f"Gradients length: {len(gradients)}" + ) + + has_complex = False + for param, gradient in zip(params, gradients): + if gradient is not None: + has_complex |= torch.is_complex(param) + params_with_grad.append(param) + grads.append(gradient) + # Lazy state initialization + if param not in self.state: + self.state[param] = {} + state = self.state[param] + state["step"] = torch.tensor(0.0) + state["prev"] = torch.zeros_like( + param, memory_format=torch.preserve_format + ) + state["step_size"] = torch.full_like(gradient, lr) + + state = self.state[param] + prevs.append(state["prev"]) + step_sizes.append(state["step_size"]) + + state["step"] += 1 + + with torch.no_grad(): + F.rprop( + params_with_grad, + grads, + prevs, + step_sizes, + step_size_min=step_size_min, + step_size_max=step_size_max, + etaminus=etaminus, + etaplus=etaplus, + foreach=self.foreach, + maximize=self.maximize, + has_complex=has_complex, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_sgd.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_sgd.py new file mode 100644 index 0000000000000000000000000000000000000000..4a807a605571979fd50d96a70a3b25f1ee507a99 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/functional_sgd.py @@ -0,0 +1,160 @@ +from typing import Dict, List, Optional + +import torch +import torch.optim._functional as F + +from torch import Tensor + +__all__: List[str] = [] + +# Define a TorchScript compatible Functional SGD Optimizer +# where we use these optimizer in a functional way. +# Instead of using the `param.grad` when updating parameters, +# we explicitly allow the distributed optimizer pass gradients to +# the `step` function. In this way, we could separate the gradients +# and parameters and allow multithreaded trainer to update the +# parameters without data traces on accumulating to the same .grad. +# NOTE: This should be only used by distributed optimizer internals +# and not meant to expose to the user. +@torch.jit.script +class _FunctionalSGD: + def __init__( + self, + params: List[Tensor], + lr: float = 1e-2, + momentum: float = 0.0, + dampening: float = 0.0, + weight_decay: float = 0.0, + nesterov: bool = False, + maximize: bool = False, + foreach: bool = False, + fused: bool = False, + _allow_empty_param_list: bool = False, + ): + self.defaults = { + "lr": lr, + "momentum": momentum, + "dampening": dampening, + "weight_decay": weight_decay, + } + self.nesterov = nesterov + self.maximize = maximize + self.foreach = foreach + self.fused = fused + self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {}) + + if len(params) == 0 and not _allow_empty_param_list: + raise ValueError("optimizer got an empty parameter list") + + # NOTE: we only have one param_group and don't allow user to add additional + # param group as it's not a common use case. + self.param_group = {"params": params} + + def step_param(self, param: Tensor, grad: Optional[Tensor]): + """Similar to self.step, but operates on a single parameter and + its gradient. + """ + # TODO: Once step_param interface is robust, refactor step to call + # step param on each param. + weight_decay = self.defaults["weight_decay"] + momentum = self.defaults["momentum"] + dampening = self.defaults["dampening"] + lr = self.defaults["lr"] + params = [param] + momentum_buffer_list: List[Optional[Tensor]] = [] + grads = [] + + has_sparse_grad = False + if grad is not None: + grads.append(grad) + if grad.is_sparse: + has_sparse_grad = True + if param not in self.state: + self.state[param] = {} + state = self.state[param] + if "momentum_buffer" not in state: + momentum_buffer_list.append(None) + else: + momentum_buffer_list.append(state["momentum_buffer"]) + + with torch.no_grad(): + F.sgd( + params, + grads, + momentum_buffer_list, + weight_decay=weight_decay, + momentum=momentum, + lr=lr, + dampening=dampening, + nesterov=self.nesterov, + maximize=self.maximize, + has_sparse_grad=has_sparse_grad, + foreach=self.foreach, + fused=self.fused, + grad_scale=None, + found_inf=None, + ) + # update momentum_buffer in state + state = self.state[param] + momentum_buffer = momentum_buffer_list[0] + if momentum_buffer is not None: + state["momentum_buffer"] = momentum_buffer + + def step(self, gradients: List[Optional[Tensor]]): + params = self.param_group["params"] + params_with_grad = [] + grads = [] + momentum_buffer_list: List[Optional[Tensor]] = [] + lr = self.defaults["lr"] + weight_decay = self.defaults["weight_decay"] + momentum = self.defaults["momentum"] + dampening = self.defaults["dampening"] + + if len(params) != len(gradients): + raise ValueError( + "the gradients passed in does not equal to the size of the parameters!" + + f"Params length: {len(params)}. " + + f"Gradients length: {len(gradients)}" + ) + + has_sparse_grad = False + for param, gradient in zip(params, gradients): + if gradient is not None: + params_with_grad.append(param) + grads.append(gradient) + if gradient.is_sparse: + has_sparse_grad = True + + if param not in self.state: + self.state[param] = {} + + state = self.state[param] + if "momentum_buffer" not in state: + momentum_buffer_list.append(None) + else: + momentum_buffer_list.append(state["momentum_buffer"]) + + with torch.no_grad(): + F.sgd( + params_with_grad, + grads, + momentum_buffer_list, + weight_decay=weight_decay, + momentum=momentum, + lr=lr, + dampening=dampening, + nesterov=self.nesterov, + maximize=self.maximize, + has_sparse_grad=has_sparse_grad, + foreach=self.foreach, + fused=self.fused, + grad_scale=None, + found_inf=None, + ) + + # update momentum_buffers in state + for i, p in enumerate(params_with_grad): + state = self.state[p] + momentum_buffer = momentum_buffer_list[i] + if momentum_buffer is not None: + state["momentum_buffer"] = momentum_buffer diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/named_optimizer.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/named_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..28edbe39d80e62bffd0773b3ad7a6e9776f61717 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/named_optimizer.py @@ -0,0 +1,331 @@ +import logging +import warnings + +from copy import deepcopy +from typing import Any, Callable, Collection, Dict, List, Mapping, Optional, Union, overload + +import torch +import torch.nn as nn +from torch import optim +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + + +__all__: List[str] = [] + +logger = logging.getLogger(__name__) + + +class _NamedOptimizer(optim.Optimizer): + """ + ``_NamedOptimizer`` takes a dict of parameters and exposes ``state_dict`` by parameter key. + + We replace the original key (number) in an optim to the + fully qualified name (FQN) string. User can initialize the optim as they + initialize a PyTorch optim, the only difference is that they also need to + pass in the FQN of each parameters. + + Args: + named_parameters (Mapping[str, Union[torch.Tensor, ShardedTensor]]): + Mapping from FQN to parameter. + optimizer_class (optim.Optimizer): + The class of optimizer to instantiate. + param_groups (Collection[Mapping[str, Any]]): + `param_groups` to pass to optimizer if specified. + The key of the inner map needs to be FQNs. + Default: None + module (nn.Module): the module whose parameters to updated + by the optimizer. + args: arguments to pass to the optimizer constructor. + kwargs: arguments to pass to the optimizer constructor. + + Example:: + >>> # xdoctest: +SKIP("distributed") + >>> from torch import optim + >>> from torch.distributed.optim import _NamedOptimizer + >>> + >>> # Define the named optimizer. + >>> m = Model(...) + >>> named_optim = _NamedOptimizer(m.named_parameters(), optim.SGD) + >>> # Forward pass + backward pass. + >>> named_optim.step() + >>> ... + >>> # Call state_dict for the named optimizer returns a FQN state_dict. + >>> named_optim.state_dict() + + Warning: This API is still in development and subject to change. + + TODO: Add tutorial for _NamedOptimizer. + TODO: Add documentation in the docstring for the public attributes + like self.param_groups and self.named_parameters. + """ + + def __init__( + self, + named_parameters: Mapping[str, Union[torch.Tensor, ShardedTensor]], + optimizer_class: optim.Optimizer, + param_groups: Optional[Collection[Mapping[str, Any]]] = None, + module: Optional[nn.Module] = None, + *args, + **kwargs, + ) -> None: + torch._C._log_api_usage_once("torch.distributed.optim._NamedOptimizer") + self.param_groups: Collection[Mapping[str, Any]] = param_groups # type: ignore[assignment] + self._param_groups_check() + self.named_parameters = dict(named_parameters) + params_for_optimizer = ( + self.named_parameters.values() if param_groups is None else param_groups + ) + self._optimizer = optimizer_class( # type: ignore[operator] + params_for_optimizer, + *args, + **kwargs, + ) + self.module = module + if param_groups is None: + self.ordered_param_keys = list(self.named_parameters.keys()) + else: + warnings.warn( + "Since we pass in param_groups, we will use param_groups to " + "initialize the optimizer, not all parameters of the module." + ) + param_to_key = {param: key for key, param in self.named_parameters.items()} # type: ignore[misc, has-type] + ordered_param_keys = [] + for group in param_groups: + for param in group["params"]: + if param not in param_to_key: + raise ValueError( + f"Expect param name {param} found in param group but is missing." + ) + ordered_param_keys.append(param_to_key[param]) + self.ordered_param_keys = ordered_param_keys + # Update param_groups from optimizer. + self.param_groups = self._optimizer.param_groups + + def _param_groups_check(self): + if self.param_groups is not None: + for param_group in self.param_groups: + assert isinstance(param_group, dict), "param group must be a dict" + assert "params" in param_group, "param group must contain key params" + params = param_group["params"] + if isinstance(params, torch.Tensor): + params = [params] + params = list(params) + for param in params: + if not isinstance(param, torch.Tensor): + raise TypeError( + "optimizer can only optimize Tensors, " + "but one of the params is " + torch.typename(param) + ) + param_group["params"] = params + + def state_dict(self) -> Dict[str, Any]: + """ + Return the ``state_dict`` of the optimizer. + + Instead of using number to index + parameters, we will use module fully qualified name (FQN) as the key. + """ + state_dict = self._optimizer.state_dict() + param_groups = state_dict["param_groups"] + + ret_state = { + self.ordered_param_keys[st_key]: state_val + for st_key, state_val in state_dict["state"].items() + } + + ret_groups = [] + for group in param_groups: + param_keys = [] + for param in group["params"]: + param_keys.append(self.ordered_param_keys[param]) + ret_group = {"params": sorted(param_keys)} + for k, v in group.items(): + if k != "params": + ret_group[k] = deepcopy(v) + ret_groups.append(ret_group) + + return self._post_state_dict({"state": ret_state, "param_groups": ret_groups}) + + @overload + def step(self, closure: None = ...) -> None: + ... + + @overload + def step(self, closure: Callable[[], float]) -> float: + ... + + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: + """ + Perform a single optimization step. + + This will call :meth:`torch.optim.Optimizer.step` on the wrapped + optimizer. + """ + return self._optimizer.step(closure=closure) + + @property + def state(self) -> Mapping[torch.Tensor, Any]: # type: ignore[override] + return self._optimizer.state + + def load_state_dict(self, state_dict: Mapping[str, Any]) -> None: + """ + Define the default behavior to load a state_dict for ``_NamedOptimizer``. + + Sample Code + ``` + my_model = MyModule() + optimizer = _NamedOptimizer(my_model.named_parameters(), Adagrad) + ... + + optim_state_dict = optimizer.state_dict() + ... + ... + + optimizer.load_state_dict(optim_state_dict) + ... + ``` + Args: + state_dict (Dict[str, Any]) : A ``state_dict`` to load into the optimizer. + Note that this state dict update is performed in place. + + .. note:: PyTorch is using lazy init to initialize the optim states. + So it is possible that there is no optim state when user call + ``load_state_dict`` and for ``_NamedOptimizer`` we make it stricter + that users can only call ``load_state_dict`` after the state is initialized. + By doing this, we can validate the optim ``state_dict`` to be loaded. + """ + new_state_dict = self._optimizer.state_dict() + state_dict = self._pre_load_state_dict(state_dict) + state = state_dict["state"] + new_state = new_state_dict["state"] + if len(new_state) == 0: + raise ValueError( + "Expects the optim to be initialized before load but found not initialized." + ) + + for idx, param_key in enumerate(self.ordered_param_keys): + # When the conditional training is performed, not all parameters are updated in the optim. + if param_key not in state.keys(): + continue + if len(state[param_key]) != len(new_state[idx]): + raise ValueError( + f"Expects equal length as {len(new_state[idx])} for parameter {param_key} but found: {len(state[param_key])}" + ) + # Iterate through all optimizer states. + for state_key, state_val in new_state[idx].items(): + if state_key not in state[param_key]: + raise ValueError( + f"Expects state {state_key} for parameter {param_key} but not found." + ) + + src_state_val = state[param_key][state_key] + if isinstance(state_val, ShardedTensor): + assert isinstance(src_state_val, ShardedTensor) + num_shards = len(state_val.local_shards()) + num_new_shards = len(src_state_val.local_shards()) + if num_shards != num_new_shards: + raise ValueError( + f"Expects equal number of shards as {num_new_shards} but found {num_shards} for {param_key}/{state_key}" + ) + for shard, src_shard in zip( + state_val.local_shards(), src_state_val.local_shards() + ): + shard.tensor.detach().copy_(src_shard.tensor) + elif isinstance(state_val, torch.Tensor): + assert isinstance(src_state_val, torch.Tensor) + state_val.detach().copy_(src_state_val) + else: + new_state[idx][state_key] = deepcopy(src_state_val) + + # Load param_groups of state_dict + src_param_groups = state_dict["param_groups"] + new_param_groups = new_state_dict["param_groups"] + + src_group_map = {} + for group in src_param_groups: + param_keys = list(group["params"]) + src_group_map[_gen_param_group_key(param_keys)] = group + new_group_map = {} + for new_group in new_param_groups: + param_keys = [] + for param_key in new_group["params"]: + param_keys.append(self.ordered_param_keys[param_key]) # type: ignore[call-overload] + new_group_map[_gen_param_group_key(param_keys)] = new_group + for group_key, new_group in new_group_map.items(): + # When not all parameters are used in training or receive gradient, aka., not all parameters + # would be in the param_group. Thus we skip the group_key here. + if group_key not in src_group_map: + continue + src_group = src_group_map[group_key] + if len(src_group) != len(new_group): + raise ValueError( + f"Expects equal param_group size as {len(new_group)} for group {group_key} but found {len(src_group)}." + ) + for k in src_group: + if k not in new_group: + raise ValueError( + f"Expects group key {k} to be in group {group_key} in `state_dict` but is missing." + ) + if k != "params": + new_group[k] = deepcopy(src_group[k]) + + self._optimizer.load_state_dict(new_state_dict) + + def add_param_group(self, param_group: Mapping[str, Any]) -> None: + """ + Add a param group to the :class:`_NamedOptimizer` s `param_groups`. + + Warning: This API is still in development and subject to change. + """ + assert isinstance(param_group, dict), "param group must be a dict" + + params = param_group["params"] + if isinstance(params, torch.Tensor): + param_group["params"] = [params] + else: + param_group["params"] = list(params) + + param_to_key = {param: key for key, param in self.named_parameters.items()} # type: ignore[misc, has-type] + for param in param_group["params"]: + if param not in param_to_key: + raise ValueError("some parameters are not in the module") + self.ordered_param_keys.append(param_to_key[param]) + + self._optimizer.add_param_group(param_group) + # Update param_groups from optimizer. + self.param_groups = self._optimizer.param_groups + + def init_state(self) -> None: + """ + Run a dummy optimizer step, which allows to initialize optimizer state because we do lazy init for most optimizers. + + This allows doing in-place loading of optimizer state from a checkpoint. + """ + for param in self.named_parameters.values(): + if param.requires_grad: + t = torch.zeros_like(param) + param.grad = torch.autograd.Variable(t) + # Calling ``step`` will load the initial state for optimizer states. + self.step(closure=None) + + def _pre_load_state_dict(self, state_dict) -> Dict[str, Any]: + # TODO(chienchin): This API should be FSDP agnostic and should support + # general user hooks. + if isinstance(self.module, FSDP): + return FSDP.optim_state_dict_to_load( + self.module, self._optimizer, state_dict, is_named_optimizer=True + ) + return state_dict + + def _post_state_dict(self, state_dict) -> Dict[str, Any]: + # TODO(chienchin): This API should be FSDP agnostic and should support + # general user hooks. + if isinstance(self.module, FSDP): + FSDP.optim_state_dict(self.module, self._optimizer, state_dict) + return state_dict + + +def _gen_param_group_key(param_keys: List[str]) -> str: + """Concatenate all param keys as a unique indentifier for one param group.""" + return "/".join(sorted(param_keys)) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/optimizer.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..8246c667509d9e40c99ca89ddd26aec735254763 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/optimizer.py @@ -0,0 +1,254 @@ +import logging + +from collections import defaultdict +from threading import Lock +from typing import List, Optional + +import torch +import torch.distributed.autograd as dist_autograd +import torch.distributed.rpc as rpc +import torch.jit as jit +import torch.nn as nn +from torch import Tensor +from torch.distributed.rpc import RRef +from .utils import functional_optim_map + +__all__ = ["DistributedOptimizer"] + +logger = logging.getLogger(__name__) + + +# XXX: we define a _ScriptModuleOptimizer here to explicitly +# compile the FunctionalOptimizer class into TorchScript +# This is because ScriptClass instance still lives in +# python unless you explicitly compile it as an attribute +# in ScriptModule or pass it to a ScriptFunction +# _ScriptLocalOptimizerInterface serves as a common +# interface type for Optimizer ScriptModules. +# +# TODO (wanchaol): remove this once we added TorchScript +# class reference semantics +@jit.interface +class _ScriptLocalOptimizerInterface: + def step(self, autograd_ctx_id: int) -> None: + pass + + +class _ScriptLocalOptimizer(nn.Module): + # TorchScript does not support multithread concurrent compiling. + # request_callback might invoke concurrent compiling, so we + # serialize the compiling with a lock + compile_lock = Lock() + + def __init__(self, optim_cls, local_params_rref, *args, **kwargs): + super().__init__() + self._local_params = [rref.local_value() for rref in local_params_rref] + self.optim = optim_cls(self._local_params, *args, **kwargs) + + @jit.export + def step(self, autograd_ctx_id: int): + all_local_grads = dist_autograd.get_gradients(autograd_ctx_id) + # apply functional optimizer step with a list of gradients + grads: List[Optional[Tensor]] = [ + all_local_grads[p] if p in all_local_grads else None + for p in self._local_params + ] + + self.optim.step(grads) + + +# TODO (wanchaol): remove/merge this with ScriptLocalOptimizer once +# we have converted all to functional optimizer in distributed.optim +class _LocalOptimizer: + # Ideally we would only need to share a lock for instances of + # _LocalOptimizer that deal with the same parameters. We are + # making a simplifying assumption here that if there is more + # than one instance of _LocalOptimizer per worker, they will + # be optimizing the same parameters (e.g. each data parallel + # trainer will create its own instance of _LocalOptimizer but + # they will all optimize the same parameters on each worker) + global_lock = Lock() + + def __init__(self, optim_cls, local_params_rref, *args, **kwargs): + self._local_params = [rref.local_value() for rref in local_params_rref] + self.optim = optim_cls(self._local_params, *args, **kwargs) + + def step(self, autograd_ctx_id): + all_local_grads = dist_autograd.get_gradients(autograd_ctx_id) + + with _LocalOptimizer.global_lock: + for param, grad in all_local_grads.items(): + param.grad = grad + self.optim.step() + + +def _new_local_optimizer(optim_cls, local_params_rref, *args, **kwargs): + return rpc.RRef(_LocalOptimizer(optim_cls, local_params_rref, *args, **kwargs)) + + +def _local_optimizer_step(local_optim_rref, autograd_ctx_id): + local_optim = local_optim_rref.local_value() + local_optim.step(autograd_ctx_id) + + +# new/step functions combined with _ScriptLocalOptimizer to provide GIL-free optimizer +def _new_script_local_optimizer(optim_cls, local_params_rref, *args, **kwargs): + optim = _ScriptLocalOptimizer(optim_cls, local_params_rref, *args, **kwargs) + + with _ScriptLocalOptimizer.compile_lock: + script_optim = jit.script(optim) + return rpc.RRef(script_optim, _ScriptLocalOptimizerInterface) + + +@jit.script +def _script_local_optimizer_step( + local_optim_rref: RRef[_ScriptLocalOptimizerInterface], autograd_ctx_id: int +) -> None: + local_optim = local_optim_rref.local_value() + local_optim.step(autograd_ctx_id) + + +def _wait_for_all(rpc_futs): + # TODO: improve error propagation + exception = None + results = [] + for fut in rpc_futs: + try: + results.append(fut.wait()) + except Exception as e: + results.append(e) + exception = e + if exception is not None: + raise exception + return results + + +class DistributedOptimizer: + """ + DistributedOptimizer takes remote references to parameters scattered + across workers and applies the given optimizer locally for each parameter. + + This class uses :meth:`~torch.distributed.autograd.get_gradients` in order + to retrieve the gradients for specific parameters. + + Concurrent calls to + :meth:`~torch.distributed.optim.DistributedOptimizer.step`, + either from the same or different clients, will + be serialized on each worker -- as each worker's optimizer can only work + on one set of gradients at a time. However, there is no guarantee that + the full forward-backward-optimizer sequence will execute for one client + at a time. This means that the gradients being applied may not correspond + to the latest forward pass executed on a given worker. Also, there is no + guaranteed ordering across workers. + + `DistributedOptimizer` creates the local optimizer with TorchScript enabled + by default, so that optimizer updates are not blocked by the Python Global + Interpreter Lock (GIL) in the case of multithreaded training (e.g. Distributed + Model Parallel). This feature is currently enabled for most optimizers. You + can also follow `the recipe`__ in PyTorch tutorials to enable TorchScript support + for your own custom optimizers. + + Args: + optimizer_class (optim.Optimizer): the class of optimizer to + instantiate on each worker. + params_rref (list[RRef]): list of RRefs to local or remote parameters + to optimize. + args: arguments to pass to the optimizer constructor on each worker. + kwargs: arguments to pass to the optimizer constructor on each worker. + + Example:: + >>> # xdoctest: +SKIP("distributed") + >>> import torch.distributed.autograd as dist_autograd + >>> import torch.distributed.rpc as rpc + >>> from torch import optim + >>> from torch.distributed.optim import DistributedOptimizer + >>> + >>> with dist_autograd.context() as context_id: + >>> # Forward pass. + >>> rref1 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 3)) + >>> rref2 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 1)) + >>> loss = rref1.to_here() + rref2.to_here() + >>> + >>> # Backward pass. + >>> dist_autograd.backward(context_id, [loss.sum()]) + >>> + >>> # Optimizer. + >>> dist_optim = DistributedOptimizer( + >>> optim.SGD, + >>> [rref1, rref2], + >>> lr=0.05, + >>> ) + >>> dist_optim.step(context_id) + + __ https://github.com/pytorch/tutorials/pull/1465 + """ + + def __init__(self, optimizer_class, params_rref, *args, **kwargs): + torch._C._log_api_usage_once("torch.distributed.optim.DistributedOptimizer") + per_worker_params_rref = defaultdict(list) + for param in params_rref: + per_worker_params_rref[param.owner()].append(param) + + if optimizer_class in functional_optim_map and jit._state._enabled: + optim_ctor = functional_optim_map.get(optimizer_class) + else: + optim_ctor = optimizer_class + self.is_functional_optim = optim_ctor != optimizer_class + + if self.is_functional_optim: + optimizer_new_func = _new_script_local_optimizer + else: + logger.warning( + "Creating the optimizer %s without TorchScript support, " + "this might result in slow computation time in multithreading environment" + "(i.e. Distributed Model Parallel training on CPU) due to the Python's " + "Global Interpreter Lock (GIL). Please file an issue if you need this " + "optimizer in TorchScript. ", + optimizer_class + ) + optimizer_new_func = _new_local_optimizer + + remote_optim_futs = [] + for worker, param_rrefs in per_worker_params_rref.items(): + remote_optim_rref_fut = rpc.rpc_async( + worker, + optimizer_new_func, + args=(optim_ctor, param_rrefs) + args, + kwargs=kwargs, + ) + remote_optim_futs.append(remote_optim_rref_fut) + + self.remote_optimizers = _wait_for_all(remote_optim_futs) + + def step(self, context_id): + """ + Performs a single optimization step. + + This will call :meth:`torch.optim.Optimizer.step` on each worker + containing parameters to be optimized, and will block until all workers + return. The provided ``context_id`` will be used to retrieve the + corresponding :class:`~torch.distributed.autograd.context` that + contains the gradients that should be applied to the parameters. + + Args: + context_id: the autograd context id for which we should run the + optimizer step. + """ + dist_autograd._is_valid_context(context_id) + + optimizer_step_func = ( + _script_local_optimizer_step + if self.is_functional_optim + else _local_optimizer_step + ) + + rpc_futs = [] + for optimizer in self.remote_optimizers: + rpc_futs.append( + rpc.rpc_async( + optimizer.owner(), + optimizer_step_func, + args=(optimizer, context_id), + ) + ) + _wait_for_all(rpc_futs) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/post_localSGD_optimizer.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/post_localSGD_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..f1717685966ad94dbdf9e0ac084f755d11ceca24 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/post_localSGD_optimizer.py @@ -0,0 +1,109 @@ +import warnings + +import torch +import torch.distributed.algorithms.model_averaging.averagers as averagers + + +class PostLocalSGDOptimizer(torch.optim.Optimizer): + r""" + Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD `_, + This optimizer runs local optimizer at every step. + After the warm-up stage, it averages parameters periodically afer the local optimizer is applied. + + Args: + optim: The local optimizer. + averager: A model averager instance to run post-localSGD algorithm. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> import torch + >>> import torch.distributed as dist + >>> import torch.distributed.algorithms.model_averaging.averagers as averagers + >>> import torch.nn as nn + >>> from torch.distributed.optim import PostLocalSGDOptimizer + >>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import ( + >>> PostLocalSGDState, + >>> post_localSGD_hook, + >>> ) + >>> + >>> model = nn.parallel.DistributedDataParallel( + >>> module, device_ids=[rank], output_device=rank + >>> ) + >>> + >>> # Register a post-localSGD communication hook. + >>> state = PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=100) + >>> model.register_comm_hook(state, post_localSGD_hook) + >>> + >>> # Create a post-localSGD optimizer that wraps a local optimizer. + >>> # Note that ``warmup_steps`` used in ``PostLocalSGDOptimizer`` must be the same as + >>> # ``start_localSGD_iter`` used in ``PostLocalSGDState``. + >>> local_optim = torch.optim.SGD(params=model.parameters(), lr=0.01) + >>> opt = PostLocalSGDOptimizer( + >>> optim=local_optim, + >>> averager=averagers.PeriodicModelAverager(period=4, warmup_steps=100) + >>> ) + >>> + >>> # In the first 100 steps, DDP runs global gradient averaging at every step. + >>> # After 100 steps, DDP runs gradient averaging within each subgroup (intra-node by default), + >>> # and post-localSGD optimizer runs global model averaging every 4 steps after applying the local optimizer. + >>> for step in range(0, 200): + >>> opt.zero_grad() + >>> loss = loss_fn(output, labels) + >>> loss.backward() + >>> opt.step() + """ + + def __init__(self, optim: torch.optim.Optimizer, averager: averagers.ModelAverager): + self.optim = optim + self.param_groups = self.optim.param_groups + self.averager = averager + + @property + def state(self): + return self.optim.state + + def __repr__(self): + return self.optim.__repr__() + + def state_dict(self): + r""" + This is the same as :class:`torch.optim.Optimizer` :meth:`state_dict`, + but adds an extra entry to record model averager's step to the checkpoint + to ensure reload does not cause unnecessary warm up again. + """ + optim_state_dict = self.optim.state_dict() + optim_state_dict["step"] = self.averager.step + return optim_state_dict + + def load_state_dict(self, state_dict): + r""" + This is the same as :class:`torch.optim.Optimizer` :meth:`load_state_dict`, + but also restores model averager's step value to the one + saved in the provided ``state_dict``. + + If there is no ``"step"`` entry in ``state_dict``, + it will raise a warning and initialize the model averager's step to 0. + """ + self.optim.load_state_dict(state_dict) + if "step" in state_dict: + self.averager.step = state_dict["step"] + else: + warnings.warn( + "Loaded state dict does not contain a step counter for an averager. " + "Setting step counter to 0." + ) + self.averager.step = 0 + + def step(self): + r""" + Performs a single optimization step (parameter update). + """ + self.optim.step() + self.averager.average_parameters(params=self.param_groups) + + def zero_grad(self, set_to_none: bool = True): # type: ignore[override] + self.optim.zero_grad(set_to_none=set_to_none) + + def add_param_group(self, param_group): + self.optim.add_param_group(param_group) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5fb197e2d1ddf5b5cca1756ce6196bb68a5d52c2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/utils.py @@ -0,0 +1,63 @@ +from typing import Type + +from torch import optim +from .functional_adadelta import _FunctionalAdadelta +from .functional_adagrad import _FunctionalAdagrad +from .functional_adam import _FunctionalAdam +from .functional_adamax import _FunctionalAdamax +from .functional_adamw import _FunctionalAdamW +from .functional_rmsprop import _FunctionalRMSprop +from .functional_rprop import _FunctionalRprop +from .functional_sgd import _FunctionalSGD + +# dict to map a user passed in optimizer_class to a functional +# optimizer class if we have already defined inside the +# distributed.optim package, this is so that we hide the +# functional optimizer to user and still provide the same API. +functional_optim_map = { + optim.Adagrad: _FunctionalAdagrad, + optim.Adam: _FunctionalAdam, + optim.AdamW: _FunctionalAdamW, + optim.SGD: _FunctionalSGD, + optim.Adadelta: _FunctionalAdadelta, + optim.RMSprop: _FunctionalRMSprop, + optim.Rprop: _FunctionalRprop, + optim.Adamax: _FunctionalAdamax, +} + + +def register_functional_optim(key, optim): + """ + Interface to insert a new functional optimizer to functional_optim_map + ``fn_optim_key`` and ``fn_optimizer`` are user defined. The optimizer and key + need not be of :class:`torch.optim.Optimizer` (e.g. for custom optimizers) + Example:: + >>> # import the new functional optimizer + >>> # xdoctest: +SKIP + >>> from xyz import fn_optimizer + >>> from torch.distributed.optim.utils import register_functional_optim + >>> fn_optim_key = "XYZ_optim" + >>> register_functional_optim(fn_optim_key, fn_optimizer) + """ + if key not in functional_optim_map: + functional_optim_map[key] = optim + + +def as_functional_optim(optim_cls: Type, *args, **kwargs): + try: + functional_cls = functional_optim_map[optim_cls] + except KeyError as e: + raise ValueError( + f"Optimizer {optim_cls} does not have a functional " f"counterpart!" + ) from e + + return _create_functional_optim(functional_cls, *args, **kwargs) + + +def _create_functional_optim(functional_optim_cls: Type, *args, **kwargs): + return functional_optim_cls( + [], + *args, + **kwargs, + _allow_empty_param_list=True, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/zero_redundancy_optimizer.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/zero_redundancy_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..8a3be3b0181536e32da5c172aac5e6c0de906e3f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/zero_redundancy_optimizer.py @@ -0,0 +1,1651 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. + +r"""Zero Redundancy Optimizer.""" +import collections +import copy +import enum +import inspect +import io +import logging +from itertools import chain +from typing import Any, Callable, Dict, List, Optional, Set, Type, Union + +import torch +import torch.distributed as dist +from torch.distributed.algorithms.join import Join, Joinable, JoinHook +from torch.distributed.optim.utils import functional_optim_map +from torch.optim import Optimizer + + +logger = logging.getLogger(__name__) + +__all__ = ["ZeroRedundancyOptimizer"] + + +# Credits: classy_vision/generic/distributed_util.py +def _recursive_copy_to_device( + value: Any, + non_blocking: bool, + device: torch.device, +) -> Any: + r""" + Recursively searches lists, tuples, dicts and copies tensors to device if possible. + + Non-tensor values are passed as-is in the result. + + .. note: These are all copies, so if there are two objects that reference + the same object, then after this call, there will be two different objects + referenced on the device. + """ + if isinstance(value, torch.Tensor): + return value.to(device, non_blocking=non_blocking) + + if isinstance(value, (list, tuple)): + values = [ + _recursive_copy_to_device(val, non_blocking=non_blocking, device=device) + for val in value + ] + return values if isinstance(value, list) else tuple(values) + + if isinstance(value, collections.abc.Mapping): + return { + key: _recursive_copy_to_device( + val, non_blocking=non_blocking, device=device + ) + for key, val in value.items() + } + + return value + + +def _is_trainable(param: torch.Tensor) -> bool: + r"""Return if a parameter is trainable, where trainability is equivalent to requiring a gradient.""" + return param.requires_grad + + +def _broadcast_object( + obj: Any, + src_rank: int, + group: object = dist.group.WORLD, + device: torch.device = torch.device("cpu"), +) -> Any: + r""" + Broadcasts an object to the given group. + + It will be sending the object if called from the source rank and receiving + the object otherwise. + + Arguments: + obj: object to broadcast; only used if called on the source rank. + src_rank (int): source rank. + group (``ProcessGroup``, optional): group used for the broadcast + (default: ``dist.group.WORLD``). + device (``torch.device``, optional): device to send from or receive + to (default: ``torch.device("cpu")``). + + Returns: + The broadcasted object. + """ + if dist.get_rank() == src_rank: + # Send the object + buffer = io.BytesIO() + torch.save(obj, buffer) + data = bytearray(buffer.getbuffer()) + length_tensor = torch.LongTensor([len(data)]).to(device) + data_send_tensor = torch.ByteTensor(data).to(device) + dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False) + dist.broadcast(data_send_tensor, src=src_rank, group=group, async_op=False) + else: + # Receive the object + length_tensor = torch.LongTensor([0]).to(device) + dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False) + data_recv_tensor = torch.empty( + [int(length_tensor.item())], dtype=torch.uint8, device=device + ) + dist.broadcast(data_recv_tensor, src=src_rank, group=group, async_op=False) + buffer = io.BytesIO(data_recv_tensor.cpu().numpy()) + obj = torch.load(buffer, map_location=device) + return obj + + +class _ZeROJoinHook(JoinHook): + def __init__(self, zero): + assert isinstance(zero, ZeroRedundancyOptimizer), ( + "ZeRO join hook requires passing in a ZeroRedundancyOptimizer " + "instance as the state" + ) + self.zero = zero + super().__init__() + + def main_hook(self): + """ + Perform an optimizer step. + + This step updates the joined process's shard of + the parameters and broadcasts those parameters. + """ + self.zero.step() + + +class _DDPBucketAssignment: + r""" + Represent a :class:`DistributedDataParallel` bucket assignment. + + This means that a (possibly non-strict) subset of the parameters corresponding to + a DDP bucket assigned to a rank to update. + + Attributes: + bucket_index (int): index of the bucket determined by the DDP gradient + bucket all-reduce order. + parameters (List[torch.Tensor]): model parameters in the bucket + assigned to this rank. + offset (int): offset into the :class:`GradBucket` 's :meth:`parameters` + giving the index of the first element in the passed-in + ``parameters``; this equivalently indexes into the + :class:`GradBucket` 's :meth:`gradients`. + device (torch.device): device on which the parameters are stored. + tensor (torch.Tensor): flattened tensor giving the data of the + parameter subset assigned to the rank. + """ + + def __init__( + self, + bucket_index: int, + parameters: List[torch.Tensor], + offset: int, + ): + self.bucket_index = bucket_index + self.parameters = parameters + self.offset = offset + if len(self.parameters) == 0: + raise ValueError("Empty bucket assignment") + # DDP guarantees all parameters in the bucket have the same device + self.device: torch.device = self.parameters[0].device + self.tensor: Optional[torch.Tensor] = None + + +class _OverlapStatus(enum.IntEnum): + r""" + Define possible statuses that :class:`ZeroRedundancyOptimizer` can be in when overlapping with :class:`DistributedDataParallel`. + + Attributes: + ``UNINITIALIZED``: The ZeRO instance is effectively uninitialized and + is waiting for DDP to finalize its bucketing. + ``DDP_HAS_REBUILT_BUCKETS``: DDP has rebuilt its buckets, meaning that + its bucketing is finalized. The ZeRO instance can now collect the + necessary information about the DDP bucketing. + ``INITIALIZED``: The ZeRO instance is fully initialized and can now + optimize parameters. + """ + + UNINITIALIZED = 0 + DDP_HAS_REBUILT_BUCKETS = 1 + INITIALIZED = 2 + + +class _OverlapInfo: + r""" + Information needed by :class:`ZeroRedundancyOptimizer` to overlap with :class:`DistributedDataParallel`. + + Arguments: + world_size (int): world size of the process group being used. + + Attributes: + shard_buckets (bool): if ``True``, then the assignment of each + :class:`DistributedDataParallel` bucket is partitioned across + possibly multiple :class:`ZeroRedundancyOptimizer` instances (i.e. + across possibly multiple ranks) to approximate uniformity following + a threshold given by the total parameter size divided by the world + size; if ``False``, then each bucket is wholly assigned to a single + :class:`ZeroRedundancyOptimizer` instance (i.e. to a single rank); + this should be set to the value passed into the hook constructor. + status (_OverlapStatus): current status; see :class:`_OverlapStatus` + for more information. + params_per_bucket (List[List[torch.Tensor]]): ``params_per_bucket[i]`` + gives the model parameters in the ``i``th bucket. + params_per_rank (List[List[torch.Tensor]]): ``params_per_rank[i]`` + gives the model parameters assigned to the ``i``th rank, where the + parameters are grouped by increasing bucket indices. + offsets (Dict[int, int]): maps from bucket index to the offset in + ``self.params_per_rank[rank]`` giving the index of the first + parameter in that bucket, where ``rank`` is this process's own + rank; the keys of this :class:`dict` are the bucket indices + assigned to this rank. + num_bucket_assignments (int): total number of bucket assignments across + all ranks; this is equal to the number of + :class:`DistributedDataParallel` gradient buckets if + ``shard_buckets=False`` and possibly greater otherwise. + total_size (int, optional): total size of all buckets (i.e. sum of + ``param.numel()`` for all ``param`` across all buckets) if + ``shard_buckets=True``; otherwise, ``None``. + broadcast_handles (List[Work]): :class:`list` of async work handles for + the parameter broadcasts. + bucket_index_to_future (Dict[int, torch.futures.Future]): + :class:`dict` mapping bucket index to the corresponding all-reduce + future. + bucket_index_to_bucket (Dict[int, dist.GradBucket]): :class:`dict` + mapping bucket index to the corresponding bucket. + bucket_indices_seen (List[int]): :class:`list` of the bucket indices + seen on this iteration. + """ + + def __init__(self, world_size) -> None: + self.status: _OverlapStatus = _OverlapStatus.UNINITIALIZED + self.shard_buckets: bool = False + + # Modified per bucket reconstruction + self.params_per_bucket: List[List[torch.Tensor]] = [] + self.params_per_rank: List[List[torch.Tensor]] = [[] for _ in range(world_size)] + self.offsets: Dict[int, int] = {} + # Group Ranks + self.assigned_ranks_per_bucket: List[Set[int]] = [] + self.num_bucket_assignments: int = 0 + self.total_size: Optional[int] = None + + # Modified per iteration + self.broadcast_handles: List[Any] = [] + self.bucket_indices_seen: List[int] = [] + # Used by `hook_with_zero_step()` + self.bucket_index_to_future: Dict[int, torch.futures.Future] = {} + self.bucket_index_to_bucket: Dict[int, dist.GradBucket] = {} + + def wait_for_broadcasts(self) -> None: + r""" + Wait for all parameter broadcasts. + + This function should be called once all broadcasts have been scheduled, + meaning ``self.broadcast_handles`` is filled. This clears ``self.broadcast_handles`` + in preparation for the next iteration. + """ + assert ( + len(self.broadcast_handles) == self.num_bucket_assignments + ), f"Missing at least one broadcast handle on rank {dist.get_rank()}" + _ = [x.wait() for x in self.broadcast_handles] + self.broadcast_handles.clear() + + def clear_per_iter_info(self) -> None: + r""" + Clear the data structures that are modified per-iteration. + + This function should be called at the end of an iteration. + """ + self.bucket_indices_seen.clear() + self.bucket_index_to_future.clear() + self.bucket_index_to_bucket.clear() + + +class ZeroRedundancyOptimizer(Optimizer, Joinable): + r""" + Wrap an arbitrary :class:`optim.Optimizer ` and shards its states across ranks in the group. + + The sharing is done as described by ZeRO_. + + The local optimizer instance in each rank is only + responsible for updating approximately ``1 / world_size`` parameters and + hence only needs to keep ``1 / world_size`` optimizer states. After + parameters are updated locally, each rank will broadcast its parameters to + all other peers to keep all model replicas in the same state. + ``ZeroRedundancyOptimizer`` can be used in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel` to reduce per-rank peak + memory consumption. + + ``ZeroRedundancyOptimizer`` uses a sorted-greedy algorithm to pack a number + of parameters at each rank. Each parameter belongs to a single rank and is + not divided among ranks. The partition is arbitrary and might not match the + the parameter registration or usage order. + + Arguments: + params (``Iterable``): an ``Iterable`` of :class:`torch.Tensor` s + or :class:`dict` s giving all parameters, which will be sharded + across ranks. + + Keyword Args: + optimizer_class (:class:`torch.nn.Optimizer`): the class of the local + optimizer. + process_group (``ProcessGroup``, optional): ``torch.distributed`` + ``ProcessGroup`` (default: ``dist.group.WORLD`` initialized by + :meth:`torch.distributed.init_process_group`). + parameters_as_bucket_view (bool, optional): if ``True``, parameters are + packed into buckets to speed up communication, and ``param.data`` + fields point to bucket views at different offsets; if ``False``, + each individual parameter is communicated separately, and each + ``params.data`` stays intact (default: ``False``). + overlap_with_ddp (bool, optional): if ``True``, :meth:`step` is + overlapped with :class:`DistributedDataParallel` 's gradient + synchronization; this requires (1) either a functional optimizer + for the ``optimizer_class`` argument or one with a functional + equivalent and (2) registering a DDP communication hook + constructed from one of the functions in ``ddp_zero_hook.py``; + parameters are packed into buckets matching those in + :class:`DistributedDataParallel`, meaning that the + ``parameters_as_bucket_view`` argument is ignored. + If ``False``, :meth:`step` runs disjointly after the backward pass + (per normal). + (default: ``False``) + **defaults: any trailing arguments, which are forwarded to the local + optimizer. + + Example:: + + >>> # xdoctest: +SKIP + >>> import torch.nn as nn + >>> from torch.distributed.optim import ZeroRedundancyOptimizer + >>> from torch.nn.parallel import DistributedDataParallel as DDP + >>> model = nn.Sequential(*[nn.Linear(2000, 2000).to(rank) for _ in range(20)]) + >>> ddp = DDP(model, device_ids=[rank]) + >>> opt = ZeroRedundancyOptimizer( + >>> ddp.parameters(), + >>> optimizer_class=torch.optim.Adam, + >>> lr=0.01 + >>> ) + >>> ddp(inputs).sum().backward() + >>> opt.step() + + .. warning:: + Currently, ``ZeroRedundancyOptimizer`` requires that all of the + passed-in parameters are the same dense type. + + .. warning:: + If you pass ``overlap_with_ddp=True``, be wary of the following: Given + the way that overlapping :class:`DistributedDataParallel` with + :class:`ZeroRedundancyOptimizer` is currently implemented, the first + two or three training iterations do not perform parameter updates in + the optimizer step, depending on if ``static_graph=False`` or + ``static_graph=True``, respectively. This is because it needs + information about the gradient bucketing strategy used by + :class:`DistributedDataParallel`, which is not finalized until the + second forward pass if ``static_graph=False`` or until the third + forward pass if ``static_graph=True``. To adjust for this, one option + is to prepend dummy inputs. + + .. warning:: ZeroRedundancyOptimizer is experimental and subject to change. + + .. _ZeRO: https://arxiv.org/abs/1910.02054 + + """ + + def __init__( + self, + params, + optimizer_class: Type[Optimizer], + process_group: Optional[Any] = None, + parameters_as_bucket_view: bool = False, + overlap_with_ddp: bool = False, + **defaults: Any, + ): + r"""Init.""" + # Perform type and assumption checks on the input parameters + params = self._verify_and_init_params(params) + self._verify_same_dense_param_type() + + # NOTE: The parent constructor uses `add_param_group()` which is + # partially overloaded in ZeroRedundancyOptimizer, so we use the + # `initialized` flag to dissociate the behaviour of `add_param_group()` + # between the parent and child. + self.initialized = False + + Optimizer.__init__(self, params, defaults) + Joinable.__init__(self) + # Now, all parameters are held in both `self._all_params` and + # `self.param_groups` + + # Internal data structures (`_cache` indicates lazily evaluated) + self._param_to_rank_cache: Dict[torch.Tensor, int] = {} + self._param_to_index_cache: Dict[torch.Tensor, int] = {} + self._partition_parameters_cache: List[List[Dict]] = [] + self._index_to_param_cache: List[torch.Tensor] = [] + self._device_to_params_per_rank_cache: Dict[ + torch.device, List[List[torch.Tensor]] + ] = {} + self._bucket_assignments_per_rank_cache: List[ + Dict[int, _DDPBucketAssignment] + ] = [] + self._is_trainable_mask = self._get_is_trainable_mask() + + # Default device for collective communication and buckets + self._default_device = self._all_params[0].device + + self.process_group = ( + process_group if process_group is not None else dist.group.WORLD + ) + self.world_size: int = dist.get_world_size(self.process_group) + self.rank: int = dist.get_rank(self.process_group) + self.global_rank: int = dist.distributed_c10d.get_global_rank( + self.process_group, self.rank + ) + + self._overlap_with_ddp: bool = overlap_with_ddp + self._optim_defaults = defaults + self._optim_constructor = self._get_optimizer_constructor(optimizer_class) + + # If `overlap_with_ddp=True`, local optimizer initialization is delayed + # to run time after the necessary information has been collected + if not overlap_with_ddp: + self._init_local_optimizer() + else: + self._overlap_info: _OverlapInfo = _OverlapInfo(self.world_size) + if parameters_as_bucket_view: + logger.warning( + "`parameters_as_bucket_view=True` will be ignored since " + "`overlap_with_ddp=True`; instead, a different bucketing " + "strategy will be used" + ) + + # `self._buckets` is used if `parameters_as_bucket_view=True`, in + # which case parameter data is flattened into contiguous bucket tensors + self.parameters_as_bucket_view = parameters_as_bucket_view + self._buckets: List[List[torch.Tensor]] = [] + self._build_param_buckets() + + # Optional consolidated optimizer state, only populated if this rank + # is the target in `consolidate_state_dict()` + self._all_state_dicts: List[Dict[str, Any]] = [] + + self.initialized = True + + def _clear_cache(self) -> None: + r"""Clear the cached data structures giving partition information.""" + self._partition_parameters_cache.clear() + self._param_to_rank_cache.clear() + self._index_to_param_cache.clear() + self._param_to_index_cache.clear() + self._device_to_params_per_rank_cache.clear() + self._bucket_assignments_per_rank_cache.clear() + + def add_param_group(self, param_group: Dict[str, Any]) -> None: + r""" + Add a parameter group to the :class:`Optimizer` 's ``param_groups``. + + This can be useful when fine tuning a pre-trained network, as frozen + layers can be made trainable and added to the :class:`Optimizer` as + training progresses. + + Arguments: + param_group (dict): specifies the parameters to be optimized and + group-specific optimization options. + + .. warning:: This method handles updating the shards on all partitions + but needs to be called on all ranks. Calling this on a subset of + the ranks will cause the training to hang because communication + primitives are called depending on the managed parameters and + expect all the ranks to participate on the same set of parameters. + """ + if self.initialized and self._overlap_with_ddp: + raise RuntimeError( + "ZeroRedundancyOptimizer with `overlap_with_ddp=True` only " + "supports a single parameter group" + ) + + super().add_param_group(param_group) + # NOTE: The rest of the method assumes that the call to the parent's + # `add_param_group()` appends the new parameter group and preserves + # the previous parameter-group ordering + + if self.initialized: + # Force a re-partitioning of the parameters + self._clear_cache() + param_groups = self._partition_parameters()[self.rank] + # NOTE: All parameters in the old parameter groups should be + # assigned to the same ranks so that the local optimizers do not + # need to be reinitialized + + # Add the parameters assigned to this rank from the new parameter + # group to the local optimizer, if any + if len(param_groups) == len(self.optim.param_groups) + 1: + self.optim.add_param_group(param_groups[-1]) + + # Update the bucketing strategy accordingly + if self.parameters_as_bucket_view: + self._build_param_buckets() + + def consolidate_state_dict(self, to: int = 0) -> None: + r""" + Consolidate a list of ``state_dict`` s (one per rank) on the target rank. + + Arguments: + to (int): the rank that receives the optimizer states (default: 0). + + Raises: + RuntimeError: if ``overlap_with_ddp=True`` and this method is + called before this :class:`ZeroRedundancyOptimizer` instance + has been fully initialized, which happens once + :class:`DistributedDataParallel` gradient buckets have been + rebuilt. + + .. warning:: This needs to be called on all ranks. + """ + self._check_overlap_initialized() + + # Sync the exposed `param_groups` attributes to the local optimizer in + # case they have been updated + self._sync_param_groups(self.param_groups, self.optim.param_groups) + + # Pull the sharded state from all ranks and store them in rank order + empty_messenger = torch.tensor( + [0], dtype=torch.uint8, device=self._default_device + ) + + # NOTE: We wastefully use `broadcast()` (e.g. instead of `gather()`) + # due to compatibility issues with NCCL backend; a possible follow-up + # is to move all sharded state management to RPC RRef + self._all_state_dicts = [] + for rank in range(self.world_size): + global_rank = dist.distributed_c10d.get_global_rank( + self.process_group, rank + ) + if self.rank == to: + # Consolidate all local `state_dict`s on this rank, storing on + # CPU to save GPU memory + if rank == self.rank: + # Directly append own optimizer state + self._all_state_dicts.append( + _recursive_copy_to_device( + self.optim.state_dict(), + non_blocking=True, + device=torch.device("cpu"), + ) + ) + else: + # Receive the optimizer state from the source rank + local_state_dict = _broadcast_object( + empty_messenger, + src_rank=global_rank, + group=self.process_group, + device=self._default_device, + ) + self._all_state_dicts.append( + _recursive_copy_to_device( + local_state_dict, + non_blocking=True, + device=torch.device("cpu"), + ) + ) + else: + if rank == self.rank: + # Send the optimizer state to the target rank + _ = _broadcast_object( + self.optim.state_dict(), + src_rank=self.global_rank, + group=self.process_group, + device=self._default_device, + ) + elif rank != to: + # Discard the received object; `broadcast()` is used for + # compatibility reasons + _ = _broadcast_object( + empty_messenger, + src_rank=global_rank, + group=self.process_group, + device=self._default_device, + ) + + def _verify_params_per_rank( + self, + params_per_rank: List[List[torch.Tensor]], + ) -> None: + r""" + Verify ``params_per_rank`` for :meth:`_partition_parameters`. + + The verification is done by checking that ``params_per_rank`` has length equal + to the world size and that it does not contain any parameters not passed into the + :class:`ZeroRedundancyOptimizer` constructor. + + The parameters in ``params_per_rank`` being a strict subset of those + passed into the constructor is valid since some parameters may be + frozen. + + Raises: + ValueError: if ``params_per_rank`` does not have length equal to + the world size or if it contains a parameter that was not + passed into the :class:`ZeroRedundancyOptimizer` constructor. + """ + if len(params_per_rank) != self.world_size: + raise ValueError( + "`params_per_rank` must have length equal to the world size" + ) + all_params_set = set(self._all_params) + for params in params_per_rank: + for param in params: + if param not in all_params_set: + raise ValueError( + "Passing a new parameter in `params_per_rank` that " + "was not passed into the ZeroRedundancyOptimizer " + "constructor" + ) + + def _partition_param_group( + self, param_group: Dict[str, Any], params_per_rank: List[List[torch.Tensor]] + ) -> None: + r""" + Partition the parameter group ``param_group`` according to ``params_per_rank``. + + The partition will modify the ``self._partition_parameters_cache``. This method should + only be used as a subroutine for :meth:`_partition_parameters`. + + Arguments: + param_group (dict[str, Any]): a parameter group as normally defined + in an optimizer state. + params_per_rank (list[list[torch.Tensor]]): a :class:`list` of + length world size containing :class:`list` s of parameters to + assign to each rank. + """ + for rank, params in enumerate(params_per_rank): + rank_param_group = copy.copy(param_group) + rank_param_group["params"] = params + self._partition_parameters_cache[rank].append(rank_param_group) + + def _partition_parameters( + self, + params_per_rank: Optional[List[List[torch.Tensor]]] = None, + ) -> List[List[Dict]]: + r""" + Partitions parameters across distributed data parallel ranks. + + Arguments: + params_per_rank (list[list[torch.Tensor]], optional): a + :class:`list` of length world size containing :class:`list` s + of parameters to assign to each rank; this provides a way to + specify a partition manually. + If ``None``, the parameters are partitioned according to an + internal algorithm. + (default: ``None``) + + Returns: + A :class:`list` where each element of the list contains the + ``param_groups`` for a rank (which itself is a :class:`list` of + :class:`dict`); element 0 corresponds to rank 0, etc.; each rank + stores the ``param_groups`` for all ranks for the collective + communication in :meth:`step`. + + Raises: + ValueError: see :meth:`_validate_params_per_rank`. + RuntimeError: if ``params_per_rank`` is not ``None`` and this + :class:`ZeroRedundancyOptimizer` instance is using more than + one parameter group. + """ + if params_per_rank is None: + # Partition the parameters optimizing for uniformity + if len(self._partition_parameters_cache) == 0: + self._partition_parameters_cache = [[] for _ in range(self.world_size)] + sizes = [0] * self.world_size + for param_group in self.param_groups: + param_group_params_per_rank: List[List] = [ + [] for _ in range(self.world_size) + ] + # Sort the parameters by size (largest first) + params_sorted = sorted( + param_group["params"], key=lambda t: t.numel(), reverse=True + ) + for param in params_sorted: + # Greedily add the parameter to rank with smallest size so far + rank = self._get_min_index(sizes) + param_group_params_per_rank[rank].append(param) + sizes[rank] += param.numel() + # Apply the constructed partition of the parameter group + self._partition_param_group( + param_group, param_group_params_per_rank + ) + + return self._partition_parameters_cache + + # Partition the parameters according to `params_per_rank` + assert len(self._partition_parameters_cache) == 0, ( + "Specifying `params_per_rank` should only be done when the " + "parameters have not been partitioned yet" + ) + if len(self.param_groups) != 1: + raise RuntimeError( + "Specifying `params_per_rank` only supports a single parameter group" + ) + self._verify_params_per_rank(params_per_rank) + self._partition_parameters_cache = [[] for _ in range(self.world_size)] + + # Apply the passed-in partition of the parameter group + param_group = self.param_groups[0] + self._partition_param_group(param_group, params_per_rank) + + return self._partition_parameters_cache + + @property + def _param_to_rank(self) -> Dict[torch.Tensor, int]: + r""":class:`dict` mapping parameters to their assigned data parallel rank in the partition.""" + if len(self._param_to_rank_cache) == 0: + for rank, param_groups in enumerate(self._partition_parameters()): + for param_group in param_groups: + for param in param_group["params"]: + self._param_to_rank_cache[param] = rank + return self._param_to_rank_cache + + @property + def _param_to_index(self) -> Dict[torch.Tensor, int]: + r""" + :class:`dict` mapping parameters to their indices in the global optimizer state. + + NOTE: This assumes that the global optimizer state's indexing (in + ``state_dict``) follows a linear ordering over the parameter groups. + """ + if len(self._param_to_index_cache) == 0: + self._param_to_index_cache = { + p: i + for i, p in enumerate(chain(*(g["params"] for g in self.param_groups))) + } + return self._param_to_index_cache + + @property + def _index_to_param(self) -> List[torch.Tensor]: + r"""List mapping parameter indices in the global optimizer scheme to the actual params.""" + if len(self._index_to_param_cache) == 0: + self._index_to_param_cache = list( + chain(*(g["params"] for g in self.param_groups)) + ) + return self._index_to_param_cache + + def _broadcast_params_from_rank(self, rank: int): + r""" + Broadcast the shard of parameters from a given rank to all other ranks asynchronously. + + Arguments: + rank (int): the source rank. + + Returns: + A :class:`list` of async work handles for the ``broadcast()`` s + performed to synchronize the parameters. + """ + assert not self._overlap_with_ddp, ( + "`_broadcast_params_from_rank()` should not be used if " + "`overlap_with_ddp=True`; instead, the broadcasting should " + "happen in the DDP communication hook" + ) + handles = [] + if self.parameters_as_bucket_view: + for dev_i_buckets in self._buckets: + bucket = dev_i_buckets[rank] + global_rank = dist.distributed_c10d.get_global_rank( + self.process_group, rank + ) + handles.append( + dist.broadcast( + tensor=bucket, + src=global_rank, + group=self.process_group, + async_op=True, + ) + ) + else: + param_groups = self._partition_parameters()[rank] + global_rank = dist.distributed_c10d.get_global_rank( + self.process_group, rank + ) + for param_group in param_groups: + for param in param_group["params"]: + handles.append( + dist.broadcast( + tensor=param.data, + src=global_rank, + group=self.process_group, + async_op=True, + ) + ) + return handles + + def _sync_params(self): + r""" + Sync all parameter shards across the ranks. + + This rank sends its shard of the parameters to all other ranks and + receives a shard from each other rank. This is done using + ``broadcast()``. Parameters are sent bucket-by-bucket if + ``parameters_as_bucket_view=True``and sent parameter-by-parameter + otherwise. + """ + handles = [] + for rank in range(self.world_size): + handles.extend(self._broadcast_params_from_rank(rank)) + _ = [x.wait() for x in handles] + + @property + def _device_to_params_per_rank( + self, + ) -> Dict[torch.device, List[List[torch.Tensor]]]: + r""" + Return device parameters assigned per rank. + + :class:`dict` mapping each device to a :class:`list` of the per-rank parameter + lists filtered to only include the parameters stored on that device. + Each per-rank parameter list gives the parameters assigned to that rank + to update. + + This is used for constructing the parameter buckets if + ``parameters_as_bucket_view=True``. + + Let ``dev_i`` denote the ``i``th device for this rank. Then: + ``dev_0`` maps to a list containing: + rank 0's assigned parameters stored on ``dev_0``, + rank 1's assigned parameters stored on ``dev_0``, + ... + ``dev_1`` maps to a list containing: + rank 0's assigned parameters stored on ``dev_1``, + rank 1's assigned parameters stored on ``dev_1``, + ... + ... + """ + assert self.parameters_as_bucket_view, ( + "`_device_to_params_per_rank` should only be used if " + "`parameters_as_bucket_view=True`" + ) + if len(self._device_to_params_per_rank_cache) == 0: + for rank, param_groups in enumerate(self._partition_parameters()): + for param_group in param_groups: + for param in param_group["params"]: + device = param.device + if device not in self._device_to_params_per_rank_cache: + self._device_to_params_per_rank_cache[device] = [ + [] for _ in range(self.world_size) + ] + self._device_to_params_per_rank_cache[device][rank].append( + param + ) + return self._device_to_params_per_rank_cache + + def _get_min_index( + self, + values: List[int], + disallowed_indices: Optional[Set[int]] = None, + ) -> int: + r""" + Return ``values.index(min(values))``, except only uses one pass. + + It also excludes any indices in ``disallowed_indices`` if provided. + + Arguments: + values: (List[int]): :class:`list` of values. + disallowed_indices (Optional[Set[int]]): indices that are + disallowed from being the returned min index. + """ + min_index = -1 + min_value = float("inf") + for i, value in enumerate(values): + if disallowed_indices and i in disallowed_indices: + continue + if value < min_value: + min_value = value + min_index = i + assert min_index >= 0, "All indices are disallowed" + return min_index + + def _assign_bucket_subset_to_rank( + self, + bucket_index: int, + bucket_params: List[torch.Tensor], + bucket_offset: int, + assigned_rank: int, + assigned_ranks_per_bucket: List[Set[int]], + ) -> None: + r""" + Assign ``bucket_params`` to the rank with the least size assigned so far and collects relevant information. + + The model parameters given by ``bucket_params`` represents a (possibly non-strict) + subset of the parameters corresponding to a :class:`DistributedDataParallel` bucket. + + Arguments: + bucket_index (int): index of the :class:`DistributedDataParallel` + gradient bucket. + bucket_params (List[torch.Tensor]): subset of the parameters + corresponding to the bucket to assign. + bucket_offset (int): offset giving the index of the first element + in ``bucket_params`` in the bucket's full parameter list. + assigned_rank (int): group rank to assign to. + assigned_ranks_per_bucket (List[Set[int]]): :class:`set` of group ranks + assigned to each bucket. + """ + overlap_info = self._overlap_info + if len(bucket_params) == 0: + raise ValueError("Empty bucket assignment") + params_per_rank = overlap_info.params_per_rank + offsets = overlap_info.offsets + + self._bucket_assignments_per_rank_cache[assigned_rank][ + bucket_index + ] = _DDPBucketAssignment(bucket_index, bucket_params, bucket_offset) + if self.global_rank == assigned_rank: + offsets[bucket_index] = len(params_per_rank[assigned_rank]) + params_per_rank[assigned_rank].extend(bucket_params) + assigned_ranks_per_bucket[bucket_index].add(assigned_rank) + self._overlap_info.num_bucket_assignments += 1 + + @property + def _bucket_assignments_per_rank(self) -> List[Dict[int, _DDPBucketAssignment]]: + r""" + Return DDP bucket parameters assigned per rank. + + :class:`list` of length world size consisting of :class:`dict` s + mapping bucket indices to :class:`_DDPBucketAssignment` s for each + rank. + """ + assert self._overlap_with_ddp, ( + "`_bucket_assignments_per_rank` only be used if `overlap_with_ddp=True`" + ) + if len(self._bucket_assignments_per_rank_cache) > 0: + return self._bucket_assignments_per_rank_cache + + overlap_info = self._overlap_info + assert overlap_info.status == _OverlapStatus.INITIALIZED + + self._bucket_assignments_per_rank_cache = [{} for _ in range(self.world_size)] + params_per_bucket = overlap_info.params_per_bucket + + if overlap_info.shard_buckets: + # Define the assignment threshold to approximate uniformity + assert overlap_info.total_size is not None, "`total_size` was not computed" + threshold = overlap_info.total_size / self.world_size # type: ignore[operator] + size_per_rank = [0 for _ in range(self.world_size)] + + num_buckets = len(params_per_bucket) + overlap_info.assigned_ranks_per_bucket = [set() for _ in range(num_buckets)] + assigned_ranks_per_bucket = overlap_info.assigned_ranks_per_bucket + if not overlap_info.shard_buckets: + # Assign each DDP bucket entirely to a single rank + for bucket_index, bucket_params in enumerate(params_per_bucket): + assert len(bucket_params) > 0, "Empty bucket" + assigned_rank = self._get_assigned_rank(bucket_index) + self._assign_bucket_subset_to_rank( + bucket_index, + bucket_params, + 0, + assigned_rank, + assigned_ranks_per_bucket, + ) + else: + # Assign each DDP bucket to possibly multiple ranks + # Specifically, sort the DDP buckets by increasing size, and for + # each bucket, iteratively assign the maximal unassigned subset + # with size less than `threshold` to the rank with the least total + # size so far -- each such assignment is represented by a + # `_DDPBucketAssignment` instance and only contains parameters from + # a single DDP bucket + params_per_bucket_enum = sorted( + enumerate(params_per_bucket), key=lambda x: sum(p.numel() for p in x[1]) + ) + for bucket_index, bucket_params in params_per_bucket_enum: + assert len(bucket_params) > 0, "Empty bucket" + bucket_offset = 0 + assignment_size = 0 + for param_index, param in enumerate(bucket_params): + param_numel = param.numel() + if ( + assignment_size + param_numel >= threshold + and param_index > bucket_offset + ): + assigned_rank = self._get_min_index( + size_per_rank, assigned_ranks_per_bucket[bucket_index] + ) + # Include up to but not including the parameter that + # exceeded the threshold + self._assign_bucket_subset_to_rank( + bucket_index, + bucket_params[bucket_offset:param_index], + bucket_offset, + assigned_rank, + assigned_ranks_per_bucket, + ) + size_per_rank[assigned_rank] += assignment_size + bucket_offset = param_index + assignment_size = 0 + assignment_size += param_numel + # Assign the remainder of the bucket so that no assignment + # spans across two buckets + assigned_rank = self._get_min_index( + size_per_rank, assigned_ranks_per_bucket[bucket_index] + ) + self._assign_bucket_subset_to_rank( + bucket_index, + bucket_params[bucket_offset:], + bucket_offset, + assigned_rank, + assigned_ranks_per_bucket, + ) + size_per_rank[assigned_rank] += assignment_size + + return self._bucket_assignments_per_rank_cache + + def _local_step( + self, + gradients: Optional[List[Optional[torch.Tensor]]] = None, + closure: Optional[Callable[[], float]] = None, + **kwargs: Any, + ) -> Optional[float]: + r""" + Perform a single optimizer step without syncing parameters across ranks. + + Arguments: + gradients (list[Optional[torch.Tensor]], optional): a :class:`list` + of length equal to the number of parameters assigned to this + rank containing gradient tensors or ``None`` as its elements; + a ``None`` in the :class:`list` indicates that the + corresponding parameter should not be updated. + If the argument itself is ``None``, then all parameters are + updated, and the gradients are assumed to be already populated. + (default: ``None``) + closure (Callable): a closure that re-evaluates the model and + returns the loss; optional for most optimizers and should be + ``None`` if ``gradients`` is not ``None``; (default: ``None``) + Returns: + Optional loss depending on the underlying local optimizer. + + .. warning:: + The argument ``gradients`` should only be specified (i.e. not + ``None``) if ``overlap_with_ddp=True``, in which case + :class:`ZeroRedundancyOptimizer` wraps a functional optimizer. + """ + Join.notify_join_context(self) + # Check if the model trainability has changed + is_trainable_mask = self._get_is_trainable_mask() + if is_trainable_mask != self._is_trainable_mask: + if self._overlap_with_ddp: + raise RuntimeError( + "ZeroRedundancyOptimizer with `overlap_with_ddp=True` " + "does not support changing parameter trainability at run " + "time" + ) + logger.warning( + "ZeroRedundancyOptimizer detected that the trainable " + "parameters changed; rebuilding the parameter buckets if " + "enabled" + ) + self._build_param_buckets() + self._is_trainable_mask = is_trainable_mask + + # Sync the exposed `param_groups` attributes to the local optimizer in + # case they have been updated + self._sync_param_groups(self.param_groups, self.optim.param_groups) + + # Run the optimizer step on this shard only + if gradients is None: + loss = ( + self.optim.step(**kwargs) + if closure is None + else self.optim.step(closure=closure, **kwargs) + ) + else: + assert self._overlap_with_ddp, ( + "Specifying `gradients` should not " + "be used when `overlap_with_ddp=False`" + ) + assert closure is None, ( + "`closure` is not supported when using a local functional optimizer" + ) + loss = self.optim.step(gradients=gradients) + + # Sync any updated attributes in the local optimizer to the exposed + # `param_groups` + self._sync_param_groups(self.optim.param_groups, self.param_groups) + + return loss + + def step( + self, + closure: Optional[Callable[[], float]] = None, + **kwargs: Any, + ) -> Optional[float]: + r""" + Perform a single optimizer step and syncs parameters across all ranks. + + Arguments: + closure (Callable): a closure that re-evaluates the model and + returns the loss; optional for most optimizers. + Returns: + Optional loss depending on the underlying local optimizer. + + .. note: Any extra parameters are passed to the base optimizer as-is. + """ + if self._overlap_with_ddp: + logger.warning( + "`step()` should not be included in the training loop when " + "`overlap_with_ddp=True`" + ) + return None + + # Perform the local optimizer step + loss = self._local_step(closure=closure, **kwargs) + + # Sync all of the updated parameter shards across the ranks + self._sync_params() + + return loss + + def join_hook(self, **kwargs): + r""" + Return the ZeRO join hook. + + It enables training on uneven inputs by + shadowing the collective communications in the optimizer step. + + Gradients must be properly set before this hook is called. + + Arguments: + kwargs (dict): a :class:`dict` containing any keyword arguments + to modify the behavior of the join hook at run time; all + :class:`Joinable` instances sharing the same join context + manager are forwarded the same value for ``kwargs``. + + This hook does not support any keyword arguments; i.e. ``kwargs`` is + unused. + """ + return _ZeROJoinHook(self) + + @property + def join_device(self) -> torch.device: + r"""Return default device.""" + return self._default_device + + @property + def join_process_group(self) -> Any: + r"""Return process group.""" + return self.process_group + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + r""" + Load the state pertaining to the given rank from the input ``state_dict``, updating the local optimizer as needed. + + Arguments: + state_dict (dict): optimizer state; should be an object returned + from a call to :meth:`state_dict`. + + Raises: + RuntimeError: if ``overlap_with_ddp=True`` and this method is + called before this :class:`ZeroRedundancyOptimizer` instance + has been fully initialized, which happens once + :class:`DistributedDataParallel` gradient buckets have been + rebuilt. + """ + self._check_overlap_initialized() + + for index, value in state_dict["state"].items(): + param = self._index_to_param[index] + if self._param_to_rank[param] != self.rank: + # Clear any state irrelevant to this rank + state_dict["state"][index] = None + else: + # Load the parameter state to the local optimizer + self.optim.state[param] = _recursive_copy_to_device( + value, non_blocking=True, device=param.device + ) + # Force zero-dimensional tensors (like Adam "step") on CPU + for state_name, state_value in self.optim.state[param].items(): + if torch.is_tensor(state_value) and state_value.dim() == 0: + self.optim.state[param][state_name] = state_value.cpu() + + super().load_state_dict(state_dict) + + # Sync the input state with the exposed and local optimizer states + self._sync_param_groups(state_dict["param_groups"], self.param_groups) + self._sync_param_groups(self.param_groups, self.optim.param_groups) + + def state_dict(self) -> Dict[str, Any]: + r""" + Return the last global optimizer state known to this rank. + + .. warning: + If the state has not been consolidated to this rank, this raises a + runtime error, and even if it has, the state may not be up-to-date, + depending on when :meth:`consolidate_state_dict` was last called. + + Raises: + RuntimeError: if ``overlap_with_ddp=True`` and this method is + called before this :class:`ZeroRedundancyOptimizer` instance + has been fully initialized, which happens once + :class:`DistributedDataParallel` gradient buckets have been + rebuilt; or if this method is called without a preceding call + to :meth:`consolidate_state_dict`. + """ + self._check_overlap_initialized() + + if len(self._all_state_dicts) == 0: + raise RuntimeError( + "Optimizer state has not been consolidated on this rank. " + f"Please call `consolidate_state_dict(to={self.rank})` on " + "all ranks beforehand if you meant to save the global state." + ) + + # Get the possibly-stale global optimizer state that uses global + # parameter indexing + state_dict = super().state_dict() + + # Update the global optimizer state with local state information, + # factoring in the translation from local to global indexing + for rank, local_state_dict in enumerate(self._all_state_dicts): + local_param_groups = local_state_dict["param_groups"] + global_param_groups = self._partition_parameters()[rank] + assert len(local_param_groups) == len( + global_param_groups + ), "Mismatch between number of local and global parameter groups" + + for local_param_group, global_param_group in zip( + local_param_groups, global_param_groups + ): + # `local_param_group` stores local indices, while + # `global_param_group` stores the tensors directly + local_param_indices = local_param_group["params"] + global_params = global_param_group["params"] + + assert len(local_param_indices) == len( + global_params + ), "Mismatch between number of local and global parameters in parameter group" + for local_param_index, global_param in zip( + local_param_indices, global_params + ): + # Update the global parameter state, if any + if local_param_index in local_state_dict["state"]: + global_param_index = self._param_to_index[global_param] + state_dict["state"][global_param_index] = local_state_dict[ + "state" + ][local_param_index] + + # Sort the parameters in the state + state_dict["state"] = dict(sorted(state_dict["state"].items())) + return state_dict + + @staticmethod + def _sync_param_groups( + src_param_groups: List[Dict[Any, Any]], + dst_param_groups: List[Dict[Any, Any]], + ) -> None: + r""" + Sync the attributes from the source parameter groups to the destination parameter groups. + + Example attributes include learning rate or scheduler attributes. The + two parameter groups should have the same length (i.e. same number of + parameter groups). + + Arguments: + src_param_groups (list[dict]): parameter groups giving the + attribute settings to copy. + dst_param_groups (list[dict]): parameter groups giving the + attribute settings to set. + """ + assert len(src_param_groups) == len( + dst_param_groups + ), "Mismatch between number of source and destination parameter groups" + for src_param_group, dst_param_group in zip(src_param_groups, dst_param_groups): + # Sync all attributes except the parameters + for attr in filter(lambda x: x != "params", src_param_group.keys()): + dst_param_group[attr] = src_param_group[attr] + + def _build_param_buckets(self) -> None: + r""" + Build parameter buckets if ``parameters_as_bucket_view=True``. + + For each device that stores this rank's parameters, there is a + bucket (represented as a tensor) containing all of the parameters on + that device that are assigned to a given rank in the parameter update + partition. + + This method is called in the constructor and any time parameter + trainability is changed. + + .. warning:: + The current implementation assumes that all of the parameters in a + bucket are of the same dense type when allocating the bucket's + tensor. + + .. warning:: + If the model parameters are stored across more than one device, + then the storage partitioning must be the same across all + processes in order for parameter synchronization to work. + """ + if not self.parameters_as_bucket_view or self._overlap_with_ddp: + return + + # `self._buckets[i][j]` are the parameters stored on device i and + # assigned to rank j + num_devices = len(self._device_to_params_per_rank) + self._buckets = [[] for _ in range(num_devices)] # type: ignore[assignment] + + for dev_i, (device, params_per_rank) in enumerate( + self._device_to_params_per_rank.items() + ): + for params in params_per_rank: + bucket_size = 0 + dtype = None + trainable_params = [] + for param in params: + if not _is_trainable(param): + # Clone in case the parameter was previously part of + # a bucket to avoid the data from being destroyed + param.data = param.data.detach().clone() + else: + bucket_size += param.numel() + trainable_params.append(param) + dtype = param.dtype # assumes all same dtype + + if bucket_size == 0: + # Create a dummy bucket if there are no parameters + bucket = torch.zeros(1, device=device) + else: + # Construct the bucket (assuming all dense and same dtype) + bucket = torch.empty(bucket_size, dtype=dtype, device=device) + offset = 0 + for param in trainable_params: + offset_next = offset + param.numel() + bucket[offset:offset_next].copy_(param.data.flatten()) + param.data = bucket[offset:offset_next].view_as(param.data) + offset = offset_next + self._buckets[dev_i].append(bucket) # type: ignore[arg-type] + + def _build_ddp_param_buckets(self) -> None: + r""" + Build the DDP bucket with parameters assigned to this rank. + + For each DDP bucket with parameters assigned to this rank, flattens the + data of those parameters into a single tensor and saves the tensor to + the ``tensor`` attribute in the corresponding + :class:`_DDPBucketAssignment` instance stored in + ``self._bucket_assignments_per_rank``. + + :class:`DistributedDataParallel` guarantees that the parameters + corresponding to a gradient bucket have the same device and the same + dtype. + """ + for bucket_assignments in self._bucket_assignments_per_rank: + for bucket_assignment in bucket_assignments.values(): + params = bucket_assignment.parameters + bucket_size = 0 + dtype = None + for param in params: + assert _is_trainable(param), ( + "Model parameter " + "corresponding to a gradient in a DDP bucket should " + "require a gradient" + ) + bucket_size += param.numel() + dtype = param.dtype # assumes all same dtype + assert bucket_size > 0, "Empty bucket" + + # Construct the bucket tensor (assuming all dense and same dtype) + tensor = torch.empty( + bucket_size, dtype=dtype, device=bucket_assignment.device + ) + offset = 0 + for param in params: + offset_next = offset + param.numel() + tensor[offset:offset_next].copy_(param.data.flatten()) + param.data = tensor[offset:offset_next].view_as(param.data) + offset = offset_next + bucket_assignment.tensor = tensor + + def _verify_and_init_params( + self, + params: Any, + ) -> Union[List[torch.Tensor], List[dict]]: + r""" + Verify the type of ``params`` and initializes ``self._all_params`` as a :class:`list` of all parameters. + + The initializagtion will first make sure that provided ``params`` is valid. + + Arguments: + params (Any): Candidate parameter list or parameter groups to verify. + + Raises: + TypeError: ``params`` has an invalid type. + ValueError: ``params`` is empty. + + Returns: + The persistent form of ``params`` to be passed into the parent + :class:`Optimizer` constructor -- i.e. returns ``params`` as a + :class:`list` to ensure that it can be iterated over again. + """ + if isinstance(params, torch.Tensor): + raise TypeError( + "`params` argument should be an iterable of " + f"Tensors, but got {torch.typename(params)}" + ) + try: + all_params = list(params) + except TypeError as e: + raise TypeError( + "`params` argument should be an iterable of Tensors" + f" or dicts, but got {torch.typename(params)}" + ) from e + if len(all_params) == 0: + raise ValueError("ZeroRedundancyOptimizer got an empty parameter list") + all_tensors = True + all_dicts = True + for param in all_params: + all_tensors &= isinstance(param, torch.Tensor) + all_dicts &= isinstance(param, dict) + if not all_tensors and not all_dicts: + raise TypeError( + "`params` argument should be an iterable of Tensors or dicts" + ) + # Ensure that `self._all_params` contains a list of all parameters + if all_tensors: + self._all_params = all_params + elif all_dicts: + self._all_params = [] + # `all_params` contains parameter groups (not parameters) + for param_group in all_params: + if "params" not in param_group: + raise ValueError( + "Each parameter group passed-in via `params` must " + "have a 'params' key mapping to the parameters in " + "the group" + ) + self._all_params.extend(param_group["params"]) + return all_params + + def _verify_same_dense_param_type(self) -> None: + r""" + Verify that all parameters are of the same dense type. + + The method assumes that ``self._all_params`` has been initialized + and is non-empty. + + Raises: + ValueError: ``params`` contains sparse parameters or parameters + of varying dense types. + + NOTE: This method can be removed once support for sparse parameters + and varying parameter types is added. + """ + typename = torch.typename(self._all_params[0]) + if self._all_params[0].is_sparse: + raise ValueError( + "ZeroRedundancyOptimizer only supports using " + "the same dense type for all parameters but got " + f"{typename}" + ) + for param in self._all_params[1:]: + other_typename = torch.typename(param) + if other_typename != typename: + raise ValueError( + "ZeroRedundancyOptimizer only supports " + "using the same dense type for all " + f"parameters but got both {typename} and " + f"{other_typename}" + ) + + def _get_is_trainable_mask(self) -> List[bool]: + r"""Return a boolean mask indicating if each parameter is trainable (``requires_grad``) or not.""" + return list(map(_is_trainable, self._all_params)) + + def _init_local_optimizer(self) -> None: + r""" + Initialize this rank's local optimizer, responsible for its subset of the parameters. + + The local optimizer is saved in ``self.optim``. + """ + assert ( + self._optim_constructor is not None + ), "The local optimizer class has not been set" + + param_groups = self._partition_parameters()[self.rank] + # `overlap_with_ddp=True` requires a local functional optimizer + if self._overlap_with_ddp: + # Functional optimizers only support a single parameter group and + # require passing in the parameters as a list + assert len(param_groups) == 1, ( + "Initializing the local " + "functional optimizer with more than one parameter group" + ) + params = param_groups[0]["params"] + # Try to pass `_allow_empty_param_list=True` to avoid erroring + if ( + "_allow_empty_param_list" + in inspect.signature(self._optim_constructor).parameters + ): + self.optim: Any = self._optim_constructor( + params, **self._optim_defaults, _allow_empty_param_list=True + ) + else: + logger.warning( + "%s does not support the argument " + "`_allow_empty_param_list`; ZeroRedundancyOptimizer may " + "error due to an empty parameter list", + self._optim_constructor + ) + self.optim: Any = self._optim_constructor(params, **self._optim_defaults) # type: ignore[no-redef] + + # Log information about the DDP and ZeRO bucketing + if dist.get_debug_level() != dist.DebugLevel.OFF: + local_numel = sum(p.numel() for p in params) + num_assigned_buckets = len( + self._bucket_assignments_per_rank[self.global_rank] + ) + logger.info( + "rank %s with %s parameters " + "across %s buckets", + self.global_rank, local_numel, num_assigned_buckets + ) + if self.global_rank == 0: + logger.info( + "%s DDP " + "buckets and " + "%s bucket " + "assignments", + len(self._overlap_info.params_per_bucket), self._overlap_info.num_bucket_assignments + ) + else: + # NOTE: Passing `param_groups` into the local optimizer constructor + # bypasses the empty parameter list check + self.optim: Optimizer = self._optim_constructor(param_groups, **self._optim_defaults) # type: ignore[no-redef] + + # TODO: Manually add `self.param_groups` if using a functional + # optimizer; remove this if/when the functional optimizers support + # multiple parameter groups + if self._overlap_with_ddp and not hasattr(self.optim, "param_groups"): + assert hasattr(self.optim, "param_group"), ( + "The functional optimizer should set at least one of the " + "attributes `param_group` or `param_groups`" + ) + self.optim.param_groups = [self.optim.param_group] # type: ignore[attr-defined] + + self._sync_param_groups(self.optim.param_groups, self.param_groups) + + def _init_zero_for_overlap(self) -> None: + r"""Perform a delayed initialization of the local optimizer and the supporting data structures.""" + assert self._overlap_with_ddp, ( + "`_init_zero_for_overlap()` should only be called when " + "`overlap_with_ddp=True`" + ) + self._overlap_info.status = _OverlapStatus.INITIALIZED + self._clear_cache() + self._partition_parameters(self._overlap_info.params_per_rank) + self._build_ddp_param_buckets() + self._init_local_optimizer() + + def _get_assigned_rank(self, bucket_index: int) -> int: + r""" + Return the single rank assigned to a :class:`DistributedDataParallel` gradient bucket. + + Arguments: + bucket_index (int): index of the :class:`DistributedDataParallel` + bucket for which to get the assigned rank. + """ + assert not self._overlap_info.shard_buckets, ( + "The bucket assignment requires global bucket information and " + "will be computed later; there should be no need to use this " + "method" + ) + return bucket_index % self.world_size + + def _check_overlap_initialized(self): + r""" + Check the delayed initialization depending on the value of ``overlap_with_ddp``. + + The delayed initialization has occurred (see + :meth:`_init_zero_for_overlap`) if ``overlap_with_ddp=True``, and + raises a ``RuntimeError`` if not. This should preface methods that + should not be run before that delayed initialization. + + Raises: + RuntimeError: if ``overlap_with_ddp=True`` and + :meth:`_init_zero_for_overlap` has not been called. + """ + if ( + self._overlap_with_ddp + and self._overlap_info.status != _OverlapStatus.INITIALIZED + ): + raise RuntimeError( + "This method should not be called until this " + "ZeroRedundancyOptimizer instance has been fully " + "initialized" + ) + + def _get_optimizer_constructor(self, optimizer_class: Any) -> Any: + r""" + Return the optimizer constructor using validation and transformation depending on ``overlap_with_ddp``. + + Returns: + - ``optimizer_class`` if ``overlap_with_ddp=False`` and + ``optimizer_class`` is not a functional optimizer. + - ``optimizer_class`` if ``overlap_with_ddp=True`` and + ``optimizer_class`` is already a functional optimizer. + - The functional equivalent of ``optimizer_class`` if + ``overlap_with_ddp=True`` and ``optimizer_class`` is not + already a functional optimizer (assuming the equivalent + exists). + + Raises: + ValueError: + + - if ``overlap_with_ddp=True`` but ``optimizer_class`` is + neither a functional optimizer nor translatable to a + functional optimizer. + - if ``overlap_with_ddp=False`` and ``optimizer_class`` is a + functional optimizer. + """ + functional_optims = functional_optim_map.values() + if not self._overlap_with_ddp: + if optimizer_class in functional_optims: + # Using a functional optimizer is only supported when + # `overlap_with_ddp=True` + raise ValueError( + f"Passing in a functional optimizer {optimizer_class} " + "when `overlap_with_ddp=False`" + ) + else: + return optimizer_class + else: + if optimizer_class in functional_optims: + # Already a functional optimizer + return optimizer_class + elif optimizer_class in functional_optim_map: + # Translate the passed-in optimizer class to its functional + # equivalent if `overlap_with_ddp=True` + optim_constructor = functional_optim_map[optimizer_class] + logger.info( + "Using the functional optimizer %s " + "instead of %s since " + "`overlap_with_ddp=True`", + optim_constructor, optimizer_class + ) + return optim_constructor + else: + raise ValueError( + "Using `ddp_with_overlap=True` requires using a " + "functional optimizer, but there is no supported functional " + f"optimizer equivalent for {optimizer_class}" + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5bc82f0692c15eb4fd77a2ddfcb551021300006c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/__init__.py @@ -0,0 +1,7 @@ +import warnings +warnings.warn( + "torch.distributed.pipeline is deprecated. For up-to-date pipeline parallel " + "implementation, please refer to the PiPPy library under the PyTorch " + "organization (Pipeline Parallelism for PyTorch): " + "https://github.com/pytorch/PiPPy" +) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a16b185b4292da5d1063661920440bc788b8400f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..75a80c5db0f9f5d622d58950d09cc2a14f6779db --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/__init__.py @@ -0,0 +1,12 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""A Pipe implementation in PyTorch.""" +from .checkpoint import is_checkpointing, is_recomputing +from .pipe import Pipe, WithDevice +from .microbatch import NoChunk + +__all__ = ["Pipe", "is_checkpointing", "is_recomputing"] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fb2621131787240b6bf36223502197c948a1901 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/blockpartition.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/blockpartition.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9685d8ee90ef6dfe2f72216020713793498b6220 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/blockpartition.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/profile.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/profile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ada481036949f04c26af370cc90561e59db30bd0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/_balance/__pycache__/profile.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/checkpoint.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..e67da2499d573e9e796a9b5241187e8b0fe6d0c3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/checkpoint.py @@ -0,0 +1,364 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Checkpointing with preceding recomputation. + +PyTorch already provides the official checkpointing utilities in +:mod:`torch.utils.checkpoint`. The official checkpointing combines +recomputation and recursive backpropagation into one autograd function named +``CheckpointFunction``. Hence, the recomputation can be started only when the +gradients arrive to the function. In Pipe, the recomputation needs to precede +the gradient arrival to minimize the GPU idle time. + +We solve this problem by introducing separate autograd functions named +:class:`Recompute` and :class:`Checkpoint`. Each function represents +recomputation and recursive backpropagation, respectively. We can manipulate +the control flow in aspect of both the autograd engine and CUDA with a pair of +the functions. + +Specifically, we place CUDA stream synchronization between :class:`Recompute` +and :class:`Checkpoint` to delay only :class:`Checkpoint` until the gradient is +copied entirely. + +""" +from collections import deque +from contextlib import contextmanager +import threading +from typing import ( + Any, + Deque, + Generator, + List, + Optional, + Protocol, + Union, + Sequence, + Tuple +) + +import torch +from torch import Tensor +import torch.autograd + +from .dependency import fork, join +from .microbatch import Batch +from .phony import get_phony + +__all__ = ["Function", "checkpoint", "Checkpointing", "ThreadLocal", "enable_checkpointing", + "enable_recomputing", "is_checkpointing", "is_recomputing", "Context", "save_rng_states", + "restore_rng_states", "Checkpoint", "Recompute"] + + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] + +# Types for shared memory between Checkpoint and Recompute. +Recomputed = Tuple[TensorOrTensors, Tensors] # (output, input_leaf) +RNGStates = Tuple[Tensor, Optional[Tensor]] # (cpu_rng_state, gpu_rng_state) + + +# Protocol with __call__ instead of Callable can be used as an attribute type. +# See: https://github.com/python/mypy/issues/708#issuecomment-561735949 +class Function(Protocol): + def __call__(self, input: TensorOrTensors) -> TensorOrTensors: + ... + + +def checkpoint(function: Function, input): + """Make a checkpoint with a simple interface like + :func:`torch.utils.checkpoint.checkpoint`. It's only used to test or debug + :class:`Checkpoint` and :class:`Recompute` without boilerplate. + """ + batch = Batch(input) + + chk = Checkpointing(function, batch) + batch = chk.checkpoint() + chk.recompute(batch) + + return batch.values + + +class Checkpointing: + """Generates a pair of :class:`Checkpoint` and :class:`Recompute`.""" + + def __init__(self, function: Function, batch: Batch) -> None: + self.function = function + self.batch = batch + + # Shared memory between Checkpoint and Recompute. 1-length deque is + # used for mutability and length limitation. + self.recomputed: Deque[Recomputed] = deque(maxlen=1) + self.rng_states: Deque[RNGStates] = deque(maxlen=1) + + def checkpoint(self) -> Batch: + """Return a batch applied by :class:`Checkpoint`.""" + input_atomic = self.batch.atomic + inputs = tuple(self.batch) + + # Use a phony which requires grad to ensure that Checkpoint can be + # tracked by the autograd engine even when none of the input tensors + # require grad. + phony = get_phony(self.batch.get_device(), requires_grad=True) + + output = Checkpoint.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *inputs) + + # Gradients are only supported for float Tensors. + if isinstance(output, tuple): + output = tuple([x.detach() if torch.is_tensor(x) and not x.is_floating_point() else x for x in output]) + + return Batch(output) + + def recompute(self, batch: Batch) -> None: + """Apply :class:`Recompute` to the batch in place.""" + input_atomic = self.batch.atomic + inputs = tuple(self.batch) + + # Use a tensor in the batch to tie together fork-join + tensor_idx = batch.find_tensor_idx() + # batch[tensor_idx] is always requiring grad, because it has been passed + # checkpoint with a phony requiring grad. + batch[tensor_idx], phony = fork(batch[tensor_idx]) + phony = Recompute.apply(phony, self.recomputed, self.rng_states, self.function, input_atomic, *inputs) + batch[tensor_idx] = join(batch[tensor_idx], phony) + + +class ThreadLocal(threading.local): + def __init__(self) -> None: + self.is_checkpointing = False + self.is_recomputing = False + + +thread_local = ThreadLocal() + + +@contextmanager +def enable_checkpointing() -> Generator[None, None, None]: + """Make :func:`is_checkpointing` return :data:`True` within a context.""" + orig = thread_local.is_checkpointing + thread_local.is_checkpointing = True + try: + yield + finally: + thread_local.is_checkpointing = orig + + +@contextmanager +def enable_recomputing() -> Generator[None, None, None]: + """Makes :func:`is_recomputing` return :data:`True` within a context.""" + orig = thread_local.is_recomputing + thread_local.is_recomputing = True + try: + yield + finally: + thread_local.is_recomputing = orig + + +def is_checkpointing() -> bool: + """Whether the current forward propagation is under checkpointing. + + Returns: + bool: :data:`True` if it's under checkpointing. + + """ + return thread_local.is_checkpointing + + +def is_recomputing() -> bool: + """Whether the current forward propagation is under checkpoint recomputation. + + Use this to prevent duplicated side-effects at forward + propagation:: + + class Counter(nn.Module): + def __init__(self): + super().__init__() + self.counter = 0 + + def forward(self, input): + if not is_recomputing(): + self.counter += 1 + return input + + Returns: + bool: :data:`True` if it's under checkpoint recomputation. + + .. seealso:: :ref:`Detecting Recomputation` + + """ + return thread_local.is_recomputing + + +class Context: + """The common interface between the :class:`Checkpoint` and :class:`Recompute` context.""" + + recomputed: Deque[Recomputed] + rng_states: Deque[RNGStates] + function: Function + input_atomic: bool + inputs: Sequence[Any] + + saved_tensors: Tuple[Tensor, ...] + + def save_for_backward(self, *tensors: Tensor) -> None: # pragma: no cover + pass + + +def save_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> None: + """: + Capture the current random number generator states. + + meth:`Checkpoint.forward` captures the current PyTorch's random number + generator states at CPU and GPU to reuse in :meth:`Recompute.backward`. + + .. seealso:: :ref:`Referential Transparency` + + """ + cpu_rng_state = torch.get_rng_state() + + gpu_rng_state: Optional[Tensor] + if device.type == "cuda": + gpu_rng_state = torch.cuda.get_rng_state(device) + else: + gpu_rng_state = None + + rng_states.append((cpu_rng_state, gpu_rng_state)) + + +@contextmanager +def restore_rng_states(device: torch.device, rng_states: Deque[RNGStates],) -> Generator[None, None, None]: + """: + Restore the random number generator state. + + meth:`Recompute.backward` restores the random number generator states + captured by :func:`save_rng_states` within its context. + + .. seealso:: :ref:`Referential Transparency` + + """ + cpu_rng_state, gpu_rng_state = rng_states.pop() + + gpu_devices: List[torch.device] = [] + if device.type == "cuda": + gpu_devices.append(device) + + with torch.random.fork_rng(gpu_devices): + torch.set_rng_state(cpu_rng_state) + if gpu_rng_state is not None: + torch.cuda.set_rng_state(gpu_rng_state, device) + yield + + +class Checkpoint(torch.autograd.Function): + @staticmethod + # type: ignore[override] + def forward( + ctx: Context, + phony: Tensor, + recomputed: Deque[Recomputed], + rng_states: Deque[RNGStates], + function: Function, + input_atomic: bool, + *inputs, + ): + ctx.recomputed = recomputed + ctx.rng_states = rng_states + + save_rng_states(phony.device, ctx.rng_states) + + ctx.function = function + ctx.input_atomic = input_atomic + if input_atomic: + tensors = [inputs[0]] + else: + tensors = [] + for input in inputs: + if torch.is_tensor(input): + tensors.append(input) + + ctx.save_for_backward(*tensors) + + with torch.no_grad(), enable_checkpointing(): + if input_atomic: + assert len(inputs) == 1 + output = function(inputs[0]) + else: + output = function(*inputs) + return output + + @staticmethod + def backward(ctx: Context, *grad_output: Tensor,) -> Tuple[Optional[Tensor], ...]: # pragma: no cover + output, input_leaf = ctx.recomputed.pop() + + if isinstance(output, tuple): + outputs = output + else: + outputs = (output,) + if any(torch.is_tensor(y) and y.requires_grad for y in outputs): + tensors = tuple([x for x in outputs if torch.is_tensor(x) and x.requires_grad]) + torch.autograd.backward(tensors, grad_output) + + grad_input: List[Optional[Tensor]] = [None, None, None, None, None] + grad_input.extend(x.grad if torch.is_tensor(x) else None for x in input_leaf) + return tuple(grad_input) + + +class Recompute(torch.autograd.Function): + @staticmethod + # type: ignore[override] + def forward( + ctx: Context, + phony: Tensor, + recomputed: Deque[Recomputed], + rng_states: Deque[RNGStates], + function: Function, + input_atomic: bool, + *inputs, + ) -> Tensor: + ctx.recomputed = recomputed + ctx.rng_states = rng_states + + ctx.function = function + ctx.input_atomic = input_atomic + ctx.inputs = inputs + if input_atomic: + tensors = [inputs[0]] + else: + tensors = [] + for input in inputs: + if torch.is_tensor(input): + tensors.append(input) + ctx.save_for_backward(*tensors) + + return phony + + @staticmethod + def backward(ctx: Context, *grad_output: Tensor) -> Tuple[None, ...]: # pragma: no cover + inputs = ctx.inputs + inputs_leaf = tuple(x.detach().requires_grad_(x.requires_grad) if torch.is_tensor(x) else x for x in inputs) + + # Get the device for the inputs from a tensor + device = None + for input in inputs: + if torch.is_tensor(input): + device = input.device + break + + if device is None: + raise RuntimeError(f'No tensors found in {inputs}') + + with restore_rng_states(device, ctx.rng_states): + with torch.enable_grad(), enable_recomputing(): + if ctx.input_atomic: + assert len(inputs_leaf) == 1 + output = ctx.function(inputs_leaf[0]) + else: + output = ctx.function(*inputs_leaf) + + ctx.recomputed.append((output, inputs_leaf)) + + grad_input: List[None] = [None, None, None, None, None] + grad_input.extend(None for _ in ctx.inputs) + return tuple(grad_input) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/dependency.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/dependency.py new file mode 100644 index 0000000000000000000000000000000000000000..ca5c69e388fe4412a13c5ac3b1850ef13087e6e5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/dependency.py @@ -0,0 +1,54 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Arbitrary dependency between two autograd lanes.""" +from typing import List, Tuple + +import torch +from torch import Tensor + +from .phony import get_phony + +__all__: List[str] = ["fork", "Fork", "join", "Join"] + + +def fork(input: Tensor) -> Tuple[Tensor, Tensor]: + """Branches out from an autograd lane of the given tensor.""" + if torch.is_grad_enabled() and input.requires_grad: + input, phony = Fork.apply(input) + else: + phony = get_phony(input.device, requires_grad=False) + + return input, phony + + +class Fork(torch.autograd.Function): + @staticmethod + def forward(ctx: "Fork", input: Tensor) -> Tuple[Tensor, Tensor]: # type: ignore[override] + phony = get_phony(input.device, requires_grad=False) + return input.detach(), phony.detach() + + @staticmethod + def backward(ctx: "Fork", grad_input: Tensor, grad_grad: Tensor) -> Tensor: # type: ignore[override] + return grad_input + + +def join(input: Tensor, phony: Tensor) -> Tensor: + """Merge two autograd lanes.""" + if torch.is_grad_enabled() and (input.requires_grad or phony.requires_grad): + input = Join.apply(input, phony) + + return input + + +class Join(torch.autograd.Function): + @staticmethod + def forward(ctx: "Join", input: Tensor, phony: Tensor) -> Tensor: # type: ignore[override] + return input.detach() + + @staticmethod + def backward(ctx: "Join", grad_input: Tensor) -> Tuple[Tensor, None]: # type: ignore[override] + return grad_input, None diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/microbatch.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/microbatch.py new file mode 100644 index 0000000000000000000000000000000000000000..5b8aca25754808eb586a65543153bef0cba877c6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/microbatch.py @@ -0,0 +1,234 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Manipulation of micro-batches.""" +import typing +from typing import Any, Callable, List, Union, cast, Sequence + +import torch +from torch import Tensor +import torch.cuda.comm + +__all__: List[str] = ["NoChunk", "Batch", "check", "scatter", "gather"] + + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] +Function = Callable[[TensorOrTensors], Union[List[Any], Tensor]] + + +class NoChunk: + """ + Wrapper for a Tensor in :meth:`Pipe.forward` indicating that the tensor + should not be chunked on the batch dimension and instead be replicated + as-is across all micro-batches. This is useful for tensors which might + not have any 'batch' semantics for the model. + """ + def __init__(self, inp: Tensor): + if not torch.is_tensor(inp): + raise TypeError(f'NoChunk only supported for tensors, found: {inp}') + self._tensor = inp + + @property + def tensor(self): + return self._tensor + + +class Batch: + """ + An abstraction representing a microbatch in the pipeline. + """ + + def __init__(self, values: Union[List[Any], Tensor]) -> None: + self._values = values + self.atomic = torch.is_tensor(values) + + # Verify at least on tensor + if not self.atomic: + if not any(torch.is_tensor(value) for value in self._values): + raise TypeError(f'No tensors found in batch: {self._values}') + + @property + def tensor(self) -> Tensor: + """Retrieves the underlying tensor.""" + if not self.atomic: + raise AttributeError("not atomic batch") + return cast(Tensor, self._values) + + @property + def values(self): + """Retrieves the underlying values for the batch""" + return self._values + + def find_tensor_idx(self): + """ + Retrieves the index of first tensor found. + """ + if self.atomic: + return 0 + for i, value in enumerate(self._values): + if torch.is_tensor(value): + return i + + raise TypeError("No tensor found!") + + def get_device(self): + """ + Retrieves the device for this microbatch. + """ + if self.atomic: + return self._values.device # type: ignore[union-attr] + + for value in self._values: + if torch.is_tensor(value): + return value.device + + def call(self, function: Function) -> "Batch": + """Calls a function on the microbatch. It also wraps + the output with :class:`Batch`. + """ + if self.atomic: + return Batch(function(self._values)) + else: + return Batch(function(*self._values)) + + def __repr__(self) -> str: + return f"Batch[atomic={self.atomic!r}]({self._values!r})" + + def __iter__(self): + if self.atomic: + yield self._values + else: + yield from self._values + + def __len__(self) -> int: + return 1 if self.atomic else len(self._values) + + def __getitem__(self, index: int): + if not self.atomic: + return self._values[index] + + if index != 0: + raise IndexError("atomic batch allows index 0 only") + + return self._values + + # NOTE(sublee): pyflakes can't detect "overload" instead of "typing.overload". + @typing.overload + def __setitem__(self, index: int, value: Tensor) -> None: + ... + + @typing.overload + def __setitem__(self, index: slice, value: Tensors) -> None: + ... + + def __setitem__(self, index: Union[int, slice], value) -> None: + if isinstance(index, int): + self._setitem_by_index(index, value) + else: + self._setitem_by_slice(index, value) + + def _setitem_by_index(self, index: int, value) -> None: + if not self.atomic: + i = index + self._values = self._values[:i] + (value,) + self._values[i + 1 :] # type: ignore[operator] + return + + if index != 0: + raise IndexError("atomic batch allows index 0 only") + + self._values = value + + def _setitem_by_slice(self, index: slice, value) -> None: + if not (index.start is index.stop is index.step is None): # noqa: E714 + raise NotImplementedError("only slice [:] supported") + + if not self.atomic: + self._values = value + return + + if len(value) != 1: + raise IndexError("atomic batch cannot be replaced with multiple tensors") + + self._values = value[0] + + +def check(first_device, *inputs) -> None: + """ + Checks whether the input contains at least one tensor and each tensor is + on the same device as the first partition. + + Raises: + ValueError: input does not contain at least one tensor + + """ + + if not any(torch.is_tensor(input) for input in inputs): + raise TypeError(f'inputs do not have any tensors: {inputs}') + if any(torch.is_tensor(input) and input.device != first_device for input in inputs): + raise ValueError('All inputs should be on the same device as the first partition') + + +def scatter(*inputs, chunks: int) -> List[Batch]: + """Splits an input mini-batch into multiple micro-batches.""" + if len(inputs) == 1 and isinstance(inputs[0], Tensor): + return [Batch(x) for x in inputs[0].chunk(chunks)] + + batches: List[Any] = [[] for _ in range(chunks)] + # Actual number of chunks produced + num_chunks = -1 + for input in inputs: + if torch.is_tensor(input): + # Chunk only tensors. + tensors = input.chunk(chunks) + + # Validate number of chunks equal across all inputs. + if num_chunks != -1 and num_chunks != len(tensors): + raise RuntimeError(f'Found different number of chunks produced for inputs: {num_chunks} and {len(tensors)}') + num_chunks = len(tensors) + + for i, tensor in enumerate(tensors): + batches[i].append(tensor) + else: + # Replicate non-tensors or tensors wrapped with 'NoChunk'. + for i in range(chunks): + if isinstance(input, NoChunk): + # Extract the tensor out. + batches[i].append(input.tensor) + else: + batches[i].append(input) + + # Truncate to actual number of chunks + batches = batches[:num_chunks] + + return [Batch(x) for x in batches] + + +def gather(outputs: List[Batch]): + """Concatenates output micro-batches into a mini-batch.""" + output: Any + + if outputs[0].atomic: + tensors = tuple(b.tensor for b in outputs) + output = torch.cat(tensors) + else: + output_buf: List[Any] = [] + for i in range(len(outputs[0])): + output_type = type(outputs[0][i]) + current_outputs = [] + for batch in outputs: + if output_type != type(batch[i]): + raise TypeError(f'Types for microbatch outputs do not match, found: {output_type} and {type(batch[i])}') + current_outputs.append(batch[i]) + + if torch.is_tensor(outputs[0][i]): + output_buf.append(torch.cat(current_outputs)) + else: + output_buf.append(current_outputs) + + output = tuple(output_buf) + + return output diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/phony.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/phony.py new file mode 100644 index 0000000000000000000000000000000000000000..012926699cfbc53d85b4dd8e2bdeb14658506cb3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/phony.py @@ -0,0 +1,50 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Provides phony for arbitrary dependency in a autograd graph.""" +from typing import Dict, List, Tuple + +import torch +from torch import Tensor + +from .stream import default_stream, use_stream + +__all__: List[str] = ["get_phony"] + + +_phonies: Dict[Tuple[torch.device, bool], Tensor] = {} + + +def get_phony(device: torch.device, *, requires_grad: bool) -> Tensor: + """Get a phony. Phony is tensor without space. + + It is useful to make arbitrary dependency in a autograd graph because it doesn't require any + gradient accumulation. + + .. note:: + + Phonies for each device are cached. If an autograd function gets a phony + internally, the phony must be detached to be returned. Otherwise, the + autograd engine will mutate the cached phony in-place:: + + class Phonify(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + phony = get_phony(input.device, requires_grad=False) + return phony.detach() # detach() is necessary. + + """ + key = (device, requires_grad) + + try: + phony = _phonies[key] + except KeyError: + with use_stream(default_stream(device)): + phony = torch.empty(0, device=device, requires_grad=requires_grad) + + _phonies[key] = phony + + return phony diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipeline.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..8eccc68183fa947248baaa30dc7d2fa722789157 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/pipeline.py @@ -0,0 +1,255 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""The pipeline parallelism of Pipe.""" +from queue import Queue +from types import TracebackType +from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Type, Union, cast, Sequence + +import torch +from torch import Tensor, nn +from torch.autograd.profiler import record_function + +from .checkpoint import Checkpointing +from .copy import Copy, Wait +from .dependency import fork, join +from .microbatch import Batch +from .skip.layout import SkipLayout +from .skip.tracker import SkipTrackerThroughPotals, use_skip_tracker +from .stream import AbstractStream, current_stream, use_device +from .worker import Task, create_workers + +__all__: List[str] = ["Pipeline"] + + +Tensors = Sequence[Tensor] +TensorOrTensors = Union[Tensor, Tensors] + +ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType] + +# Queue is generic only in stubs. +# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime +if TYPE_CHECKING: + InQueue = Queue[Optional["Task"]] + OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]] +else: + InQueue = Queue + OutQueue = Queue + + +def _depend(fork_from: Batch, join_to: Batch) -> None: + fork_from_idx = fork_from.find_tensor_idx() + join_to_idx = join_to.find_tensor_idx() + + fork_from[fork_from_idx], phony = fork(fork_from[fork_from_idx]) + join_to[join_to_idx] = join(join_to[join_to_idx], phony) + + +def _copy(batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream) -> None: + batch[:] = Copy.apply(prev_stream, next_stream, *batch) + # Gradients are only supported for float Tensors. + batch[:] = tuple([x.detach() if torch.is_tensor(x) and not x.is_floating_point() else x for x in batch]) + + +def _wait(batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream) -> None: + batch[:] = Wait.apply(prev_stream, next_stream, *batch) + # Gradients are only supported for float Tensors. + batch[:] = tuple([x.detach() if torch.is_tensor(x) and not x.is_floating_point() else x for x in batch]) + + +def _clock_cycles(m: int, n: int) -> Iterable[List[Tuple[int, int]]]: + """Generate schedules for each clock cycle.""" + # m: number of micro-batches + # n: number of partitions + # i: index of micro-batch + # j: index of partition + # k: clock number + # + # k (i,j) (i,j) (i,j) + # - ----- ----- ----- + # 0 (0,0) + # 1 (1,0) (0,1) + # 2 (2,0) (1,1) (0,2) + # 3 (2,1) (1,2) + # 4 (2,2) + for k in range(m + n - 1): + yield [(k - j, j) for j in range(max(1 + k - m, 0), min(1 + k, n))] + + +class Pipeline: + """The pipeline parallelism for Pipe.""" + + def __init__( + self, + partitions: List[nn.Sequential], + devices: List[torch.device], + copy_streams: List[List[AbstractStream]], + skip_layout: SkipLayout, + checkpoint_stop: int, + ) -> None: + self.partitions = partitions + self.devices = devices + self.copy_streams = copy_streams + self.skip_layout = skip_layout + self.checkpoint_stop = checkpoint_stop + (self.in_queues, self.out_queues) = create_workers(devices) + + def run(self, batches: List[Batch]) -> None: + """Runs pipeline parallelism. + + It modifies the given batches in place. + + """ + partitions = self.partitions + devices = self.devices + skip_layout = self.skip_layout + + m = len(batches) + n = len(partitions) + + skip_trackers = [SkipTrackerThroughPotals(skip_layout) for _ in batches] + + for schedule in _clock_cycles(m, n): + self.fence(batches, schedule, skip_trackers) + self.compute(batches, schedule, skip_trackers) + + def fence( + self, batches: List[Batch], schedule: List[Tuple[int, int]], skip_trackers: List[SkipTrackerThroughPotals], + ) -> None: + """Copy micro-batches after computation for the previous micro-batches.""" + copy_streams = self.copy_streams + skip_layout = self.skip_layout + + for i, j in schedule: + # Ensure that batches[i-1] is executed after batches[i] in + # backpropagation by an explicit dependency. + if i != 0 and j != 0: + _depend(batches[i - 1], batches[i]) + + next_stream = copy_streams[j][i] + + for prev_j, ns, name in skip_layout.copy_policy(j): + prev_stream = copy_streams[prev_j][i] + skip_trackers[i].copy(batches[i], prev_stream, next_stream, ns, name) + + if j != 0: + prev_stream = copy_streams[j - 1][i] + _copy(batches[i], prev_stream, next_stream) + + def compute( + self, batches: List[Batch], schedule: List[Tuple[int, int]], skip_trackers: List[SkipTrackerThroughPotals], + ) -> None: + """Run tasks with synchronization to copy streams.""" + partitions = self.partitions + devices = self.devices + copy_streams = self.copy_streams + checkpoint_stop = self.checkpoint_stop + + # Disable checkpointing if in eval mode. + if not self.partitions[0].training: + checkpoint_stop = 0 + + n = len(partitions) + streams = [current_stream(d) for d in devices] + exc_info: Optional[ExcInfo] = None + + # With checkpointing, the autograd graph looks like this diagram: + # ┌─────┸──────┐ + # │ Copy │ + # └─────┰──────┘ (fence) + # ─ ─ ─ ╂ ─ ─ ─ ─ ─ ─ ─ ─ ─ + # ┃ (compute) + # ┌─────┸──────┐ + # │ Wait │ [1] Synchronize the current stream with the copy stream. + # └─────┰──────┘ + # ┌─────┸──────┐ + # │ Checkpoint │ [2] Compute a partition within checkpointing. + # └─────┰──────┘ + # ┌─────┸──────┐ + # │ Wait │ [3] Synchronize the copy stream with the current stream. + # └─────┰──────┘ + # ┠ ─ ─ ─ ┐ + # ┃ ┌─────┴─────┐ + # ┃ │ Recompute │ [4] Schedule the recomputation at backpropagation. + # ┃ └─────┬─────┘ + # ┠ ─ ─ ─ ┘ + # ┃ + # ─ ─ ─ ╂ ─ ─ ─ ─ ─ ─ ─ ─ ─ + # ┌─────┸──────┐ (fence) + # │ Copy │ + # └─────┰──────┘ + for i, j in schedule: + batch = batches[i] + partition = partitions[j] + + # Synchronize with the copied input. ([1] in the diagram) + if j != 0: + _wait(batch, copy_streams[j][i], streams[j]) + + # Determine whether checkpointing or not. + checkpoint = i < checkpoint_stop + if checkpoint: + + def function( + *inputs, + partition: nn.Module = partition, + skip_tracker: SkipTrackerThroughPotals = skip_trackers[i], + chunk_id: int = i, + part_id: int = j, + ) -> TensorOrTensors: + with use_skip_tracker(skip_tracker), record_function("chunk%d-part%d" % (chunk_id, part_id)): + return partition(*inputs) + + chk = Checkpointing(function, batch) # type: ignore[arg-type] + task = Task(streams[j], compute=chk.checkpoint, finalize=chk.recompute) + del function, chk + + else: + + def compute( + batch: Batch = batch, + partition: nn.Module = partition, + skip_tracker: SkipTrackerThroughPotals = skip_trackers[i], + chunk_id: int = i, + part_id: int = j, + ) -> Batch: + with use_skip_tracker(skip_tracker), record_function("chunk%d-part%d" % (chunk_id, part_id)): + return batch.call(partition) + + task = Task(streams[j], compute=compute, finalize=None) + del compute + + # Compute tasks in parallel. ([2] in the diagram) + self.in_queues[j].put(task) + + for i, j in schedule: + ok, payload = self.out_queues[j].get() + + # Hold the first exception. + if exc_info is not None: + continue + elif not ok: + exc_info = cast(ExcInfo, payload) + continue + + task, batch = cast(Tuple[Task, Batch], payload) + + # The copy stream synchronizes to copy the output. ([3] in the + # diagram) + if j != n - 1: + _wait(batch, streams[j], copy_streams[j][i]) + + # Finalize tasks. If checkpointing is enabled, here the + # recomputation is scheduled at backpropagation. ([4] in the + # diagram) + with use_device(devices[j]): + task.finalize(batch) + + batches[i] = batch + + # Fail at the first exception. + if exc_info is not None: + raise exc_info[0].with_traceback(exc_info[1], exc_info[2]) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/stream.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/stream.py new file mode 100644 index 0000000000000000000000000000000000000000..59fedf865a42bec31072d531cfb24f285499ac7e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/stream.py @@ -0,0 +1,120 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Utilities for eliminating boilerplate code to handle abstract streams with +CPU device. +""" +from contextlib import contextmanager +from typing import Generator, List, Union, cast + +import torch + +__all__: List[str] = ["CPUStreamType", "new_stream", "current_stream", "default_stream", + "use_device", "use_stream", "get_device", "wait_stream", "record_stream", + "is_cuda", "as_cuda"] + + +class CPUStreamType: + pass + + +# The placeholder on place of streams for the CPU device instead of CUDA. +CPUStream = CPUStreamType() + +# It represents both CUDA streams and the CPU stream. +AbstractStream = Union[torch.cuda.Stream, CPUStreamType] + + +def new_stream(device: torch.device) -> AbstractStream: + """Creates a new stream for either CPU or CUDA device.""" + if device.type != "cuda": + return CPUStream + return torch.cuda.Stream(device) + + +def current_stream(device: torch.device) -> AbstractStream: + """:func:`torch.cuda.current_stream` for either CPU or CUDA device.""" + if device.type != "cuda": + return CPUStream + return torch.cuda.current_stream(device) + + +def default_stream(device: torch.device) -> AbstractStream: + """:func:`torch.cuda.default_stream` for either CPU or CUDA device.""" + if device.type != "cuda": + return CPUStream + return torch.cuda.default_stream(device) + + +@contextmanager +def use_device(device: torch.device) -> Generator[None, None, None]: + """:func:`torch.cuda.device` for either CPU or CUDA device.""" + if device.type != "cuda": + yield + return + + with torch.cuda.device(device): + yield + + +@contextmanager +def use_stream(stream: AbstractStream) -> Generator[None, None, None]: + """:func:`torch.cuda.stream` for either CPU or CUDA stream.""" + if not is_cuda(stream): + yield + return + + with torch.cuda.stream(as_cuda(stream)): + yield + + +def get_device(stream: AbstractStream) -> torch.device: + """Gets the device from CPU or CUDA stream.""" + if is_cuda(stream): + return as_cuda(stream).device + return torch.device("cpu") + + +def wait_stream(source: AbstractStream, target: AbstractStream) -> None: + """:meth:`torch.cuda.Stream.wait_stream` for either CPU or CUDA stream. It + makes the source stream wait until the target stream completes work queued. + """ + if is_cuda(target): + if is_cuda(source): + # A CUDA stream waits another CUDA stream. + as_cuda(source).wait_stream(as_cuda(target)) + else: + # CPU waits a CUDA stream. + as_cuda(target).synchronize() + + # If the target is CPU, synchronization is not required. + + +def record_stream(tensor: torch.Tensor, stream: AbstractStream) -> None: + """:meth:`torch.Tensor.record_stream` for either CPU or CUDA stream.""" + if is_cuda(stream): + # NOTE(sublee): record_stream() on a shifted view tensor throws + # RuntimeError in PyTorch 1.1.0, and does nothing in 1.2.0. To safely + # protect the tensor against unexpected reallocation, here we use a + # temporal tensor associated with the same storage without shifting as + # a workaround. + # + # Issue: https://github.com/pytorch/pytorch/issues/27366 + # + tensor = tensor.new_empty([0]).set_(tensor._typed_storage()) + + # Typechecking: torch.cuda.Stream is incompatible with torch._C.Stream + tensor.record_stream(as_cuda(stream)) # type: ignore[arg-type] + + +def is_cuda(stream: AbstractStream) -> bool: + """Returns ``True`` if the given stream is a valid CUDA stream.""" + return stream is not CPUStream + + +def as_cuda(stream: AbstractStream) -> torch.cuda.Stream: + """Casts the given stream as :class:`torch.cuda.Stream`.""" + return cast(torch.cuda.Stream, stream) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..210c475317e2cf695071d25ec14a3127acb3bb4d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/utils.py @@ -0,0 +1,38 @@ +from torch import nn +from typing import List, Optional + +__all__ = ["partition_model"] + +def partition_model( + module: nn.Sequential, + balance: List[int], + devices: Optional[List[int]] = None): + """ + Partions the model accross multiple GPU devices. + + Given an :class:`nn.Sequential ` module, partitions + the model across multiple GPU devices according the provided ``balance`` + and ``devices``. + + Args: + module (:class:`nn.Sequential `): + Sequential model representing the pipe. + balance (List[int]): + List indicating the number of layers in each partition. + devices (List[int], optional): + List indicating the device to use for each partition. Defaults to + ``range(len(balance))`` + """ + device_idx = 0 + pipe_idx = 0 + balanced_pipe = [] + for num_layers in balance: + layers = [] + for i in range(num_layers): + layers.append(module[pipe_idx]) + pipe_idx += 1 + device = device_idx if devices is None else devices[device_idx] + balanced_pipe.append(nn.Sequential(*layers).to(device)) + device_idx += 1 + + return nn.Sequential(*balanced_pipe) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/worker.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/worker.py new file mode 100644 index 0000000000000000000000000000000000000000..87b20c4a5551917e89f8aac0559081f9db513e1b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/pipeline/sync/worker.py @@ -0,0 +1,132 @@ +# Copyright 2019 Kakao Brain +# +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# +# This source code is licensed under the BSD license found in the +# LICENSE file in the root directory of this source tree. +"""Multithreading in pipeline parallelism.""" +from contextlib import contextmanager +from queue import Queue +import sys +from threading import Thread +from types import TracebackType +from typing import TYPE_CHECKING, Callable, Dict, Generator, List, Optional, Tuple, Type, Union, cast + +import torch + +from .microbatch import Batch +from .stream import AbstractStream, use_device, use_stream + +__all__: List[str] = ["Task", "worker", "create_workers", "spawn_workers"] + + +ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType] + +# Queue is generic only in stubs. +# https://mypy.readthedocs.io/en/latest/common_issues.html#using-classes-that-are-generic-in-stubs-but-not-at-runtime +if TYPE_CHECKING: + InQueue = Queue[Optional["Task"]] + OutQueue = Queue[Tuple[bool, Union[Tuple["Task", Batch], ExcInfo, None]]] +else: + InQueue = Queue + OutQueue = Queue + + +class Task: + """A task represents how to compute a micro-batch on a partition. + + It consists of two parts: :meth:`compute` and :meth:`finalize`. + :meth:`compute` should be executed in worker threads concurrently. + :meth:`finalize` should be executed after when worker threads complete to + execute :meth:`compute`. + + :meth:`compute` might be boosted by worker threads. Because it produces + several CUDA API calls by user code. In PyTorch, parallel CUDA API calls + are not serialized through GIL. So more than one CUDA API call can be + produced at the same time. + + """ + + def __init__( + self, stream: AbstractStream, *, compute: Callable[[], Batch], finalize: Optional[Callable[[Batch], None]], + ) -> None: + self.stream = stream + self._compute = compute + self._finalize = finalize + self._grad_enabled = torch.is_grad_enabled() + + def compute(self) -> Batch: + with use_stream(self.stream), torch.set_grad_enabled(self._grad_enabled): + return self._compute() + + def finalize(self, batch: Batch) -> None: + if self._finalize is None: + return + with use_stream(self.stream), torch.set_grad_enabled(self._grad_enabled): + self._finalize(batch) + + +def worker(in_queue: InQueue, out_queue: OutQueue, device: torch.device) -> None: + """Main loop of a worker thread.""" + with use_device(device): + while True: + task = in_queue.get() + + if task is None: + break + + try: + batch = task.compute() + except Exception: + exc_info = cast(ExcInfo, sys.exc_info()) + out_queue.put((False, exc_info)) + continue + + out_queue.put((True, (task, batch))) + + done = (False, None) + out_queue.put(done) + + +def create_workers(devices: List[torch.device],) -> Tuple[List[InQueue], List[OutQueue]]: + """Spawns worker threads. A worker thread is bound to a device.""" + in_queues: List[InQueue] = [] + out_queues: List[OutQueue] = [] + + # Spawn workers. + workers: Dict[torch.device, Tuple[InQueue, OutQueue]] = {} + + def normalize_device(device: torch.device) -> torch.device: + if device.type == "cuda" and device.index is None: + return torch.device("cuda", index=torch.cuda.current_device()) + + if device.type == "cpu" and device.index is not None: + return torch.device("cpu") + + return device + + for device in devices: + device = normalize_device(device) + + try: + in_queue, out_queue = workers[device] + except KeyError: + in_queue = Queue() + out_queue = Queue() + workers[device] = (in_queue, out_queue) + + t = Thread(target=worker, args=(in_queue, out_queue, device), daemon=True,) + t.start() + + in_queues.append(in_queue) + out_queues.append(out_queue) + + return (in_queues, out_queues) + +@contextmanager +def spawn_workers(devices: List[torch.device],) -> Generator[Tuple[List[InQueue], List[OutQueue]], None, None]: + try: + (in_queues, out_queues) = create_workers(devices) + yield (in_queues, out_queues) + finally: + pass